Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

spi/pxa2xx: break out the private DMA API usage into a separate file

The PXA SPI driver uses PXA platform specific private DMA implementation
which does not work on non-PXA platforms. In order to use this driver on
other platforms we break out the private DMA implementation into a separate
file that gets compiled only when CONFIG_SPI_PXA2XX_PXADMA is set. The DMA
functions are stubbed out if there is no DMA implementation selected (i.e
we are building on non-PXA platform).

While we are there we can kill the dummy DMA bits in pxa2xx_spi.h as they
are not needed anymore for CE4100.

Once this is done we can add the generic DMA engine support to the driver
that allows usage of any DMA controller that implements DMA engine API.

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Tested-by: Lu Cao <lucao@marvell.com>
Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>

authored by

Mika Westerberg and committed by
Mark Brown
cd7bed00 d560040f

+711 -647
+6
drivers/spi/Kconfig
··· 297 297 help 298 298 This selects a driver for the PPC4xx SPI Controller. 299 299 300 + config SPI_PXA2XX_PXADMA 301 + bool "PXA2xx SSP legacy PXA DMA API support" 302 + depends on SPI_PXA2XX && ARCH_PXA 303 + help 304 + Enable PXA private legacy DMA API support. 305 + 300 306 config SPI_PXA2XX 301 307 tristate "PXA2xx SSP SPI master" 302 308 depends on ARCH_PXA || PCI
+3 -1
drivers/spi/Makefile
··· 47 47 obj-$(CONFIG_SPI_ORION) += spi-orion.o 48 48 obj-$(CONFIG_SPI_PL022) += spi-pl022.o 49 49 obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o 50 - obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o 50 + spi-pxa2xx-platform-objs := spi-pxa2xx.o 51 + spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o 52 + obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o 51 53 obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o 52 54 obj-$(CONFIG_SPI_RSPI) += spi-rspi.o 53 55 obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
+490
drivers/spi/spi-pxa2xx-pxadma.c
··· 1 + /* 2 + * PXA2xx SPI private DMA support. 3 + * 4 + * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 + */ 20 + 21 + #include <linux/init.h> 22 + #include <linux/delay.h> 23 + #include <linux/device.h> 24 + #include <linux/dma-mapping.h> 25 + #include <linux/pxa2xx_ssp.h> 26 + #include <linux/spi/spi.h> 27 + #include <linux/spi/pxa2xx_spi.h> 28 + 29 + #include "spi-pxa2xx.h" 30 + 31 + #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) 32 + #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) 33 + 34 + bool pxa2xx_spi_dma_is_possible(size_t len) 35 + { 36 + /* Try to map dma buffer and do a dma transfer if successful, but 37 + * only if the length is non-zero and less than MAX_DMA_LEN. 38 + * 39 + * Zero-length non-descriptor DMA is illegal on PXA2xx; force use 40 + * of PIO instead. Care is needed above because the transfer may 41 + * have have been passed with buffers that are already dma mapped. 42 + * A zero-length transfer in PIO mode will not try to write/read 43 + * to/from the buffers 44 + * 45 + * REVISIT large transfers are exactly where we most want to be 46 + * using DMA. If this happens much, split those transfers into 47 + * multiple DMA segments rather than forcing PIO. 48 + */ 49 + return len > 0 && len <= MAX_DMA_LEN; 50 + } 51 + 52 + int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) 53 + { 54 + struct spi_message *msg = drv_data->cur_msg; 55 + struct device *dev = &msg->spi->dev; 56 + 57 + if (!drv_data->cur_chip->enable_dma) 58 + return 0; 59 + 60 + if (msg->is_dma_mapped) 61 + return drv_data->rx_dma && drv_data->tx_dma; 62 + 63 + if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) 64 + return 0; 65 + 66 + /* Modify setup if rx buffer is null */ 67 + if (drv_data->rx == NULL) { 68 + *drv_data->null_dma_buf = 0; 69 + drv_data->rx = drv_data->null_dma_buf; 70 + drv_data->rx_map_len = 4; 71 + } else 72 + drv_data->rx_map_len = drv_data->len; 73 + 74 + 75 + /* Modify setup if tx buffer is null */ 76 + if (drv_data->tx == NULL) { 77 + *drv_data->null_dma_buf = 0; 78 + drv_data->tx = drv_data->null_dma_buf; 79 + drv_data->tx_map_len = 4; 80 + } else 81 + drv_data->tx_map_len = drv_data->len; 82 + 83 + /* Stream map the tx buffer. Always do DMA_TO_DEVICE first 84 + * so we flush the cache *before* invalidating it, in case 85 + * the tx and rx buffers overlap. 86 + */ 87 + drv_data->tx_dma = dma_map_single(dev, drv_data->tx, 88 + drv_data->tx_map_len, DMA_TO_DEVICE); 89 + if (dma_mapping_error(dev, drv_data->tx_dma)) 90 + return 0; 91 + 92 + /* Stream map the rx buffer */ 93 + drv_data->rx_dma = dma_map_single(dev, drv_data->rx, 94 + drv_data->rx_map_len, DMA_FROM_DEVICE); 95 + if (dma_mapping_error(dev, drv_data->rx_dma)) { 96 + dma_unmap_single(dev, drv_data->tx_dma, 97 + drv_data->tx_map_len, DMA_TO_DEVICE); 98 + return 0; 99 + } 100 + 101 + return 1; 102 + } 103 + 104 + static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data) 105 + { 106 + struct device *dev; 107 + 108 + if (!drv_data->dma_mapped) 109 + return; 110 + 111 + if (!drv_data->cur_msg->is_dma_mapped) { 112 + dev = &drv_data->cur_msg->spi->dev; 113 + dma_unmap_single(dev, drv_data->rx_dma, 114 + drv_data->rx_map_len, DMA_FROM_DEVICE); 115 + dma_unmap_single(dev, drv_data->tx_dma, 116 + drv_data->tx_map_len, DMA_TO_DEVICE); 117 + } 118 + 119 + drv_data->dma_mapped = 0; 120 + } 121 + 122 + static int wait_ssp_rx_stall(void const __iomem *ioaddr) 123 + { 124 + unsigned long limit = loops_per_jiffy << 1; 125 + 126 + while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) 127 + cpu_relax(); 128 + 129 + return limit; 130 + } 131 + 132 + static int wait_dma_channel_stop(int channel) 133 + { 134 + unsigned long limit = loops_per_jiffy << 1; 135 + 136 + while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) 137 + cpu_relax(); 138 + 139 + return limit; 140 + } 141 + 142 + static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, 143 + const char *msg) 144 + { 145 + void __iomem *reg = drv_data->ioaddr; 146 + 147 + /* Stop and reset */ 148 + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 149 + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 150 + write_SSSR_CS(drv_data, drv_data->clear_sr); 151 + write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 152 + if (!pxa25x_ssp_comp(drv_data)) 153 + write_SSTO(0, reg); 154 + pxa2xx_spi_flush(drv_data); 155 + write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 156 + 157 + pxa2xx_spi_unmap_dma_buffers(drv_data); 158 + 159 + dev_err(&drv_data->pdev->dev, "%s\n", msg); 160 + 161 + drv_data->cur_msg->state = ERROR_STATE; 162 + tasklet_schedule(&drv_data->pump_transfers); 163 + } 164 + 165 + static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data) 166 + { 167 + void __iomem *reg = drv_data->ioaddr; 168 + struct spi_message *msg = drv_data->cur_msg; 169 + 170 + /* Clear and disable interrupts on SSP and DMA channels*/ 171 + write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 172 + write_SSSR_CS(drv_data, drv_data->clear_sr); 173 + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 174 + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 175 + 176 + if (wait_dma_channel_stop(drv_data->rx_channel) == 0) 177 + dev_err(&drv_data->pdev->dev, 178 + "dma_handler: dma rx channel stop failed\n"); 179 + 180 + if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) 181 + dev_err(&drv_data->pdev->dev, 182 + "dma_transfer: ssp rx stall failed\n"); 183 + 184 + pxa2xx_spi_unmap_dma_buffers(drv_data); 185 + 186 + /* update the buffer pointer for the amount completed in dma */ 187 + drv_data->rx += drv_data->len - 188 + (DCMD(drv_data->rx_channel) & DCMD_LENGTH); 189 + 190 + /* read trailing data from fifo, it does not matter how many 191 + * bytes are in the fifo just read until buffer is full 192 + * or fifo is empty, which ever occurs first */ 193 + drv_data->read(drv_data); 194 + 195 + /* return count of what was actually read */ 196 + msg->actual_length += drv_data->len - 197 + (drv_data->rx_end - drv_data->rx); 198 + 199 + /* Transfer delays and chip select release are 200 + * handled in pump_transfers or giveback 201 + */ 202 + 203 + /* Move to next transfer */ 204 + msg->state = pxa2xx_spi_next_transfer(drv_data); 205 + 206 + /* Schedule transfer tasklet */ 207 + tasklet_schedule(&drv_data->pump_transfers); 208 + } 209 + 210 + void pxa2xx_spi_dma_handler(int channel, void *data) 211 + { 212 + struct driver_data *drv_data = data; 213 + u32 irq_status = DCSR(channel) & DMA_INT_MASK; 214 + 215 + if (irq_status & DCSR_BUSERR) { 216 + 217 + if (channel == drv_data->tx_channel) 218 + pxa2xx_spi_dma_error_stop(drv_data, 219 + "dma_handler: bad bus address on tx channel"); 220 + else 221 + pxa2xx_spi_dma_error_stop(drv_data, 222 + "dma_handler: bad bus address on rx channel"); 223 + return; 224 + } 225 + 226 + /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ 227 + if ((channel == drv_data->tx_channel) 228 + && (irq_status & DCSR_ENDINTR) 229 + && (drv_data->ssp_type == PXA25x_SSP)) { 230 + 231 + /* Wait for rx to stall */ 232 + if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) 233 + dev_err(&drv_data->pdev->dev, 234 + "dma_handler: ssp rx stall failed\n"); 235 + 236 + /* finish this transfer, start the next */ 237 + pxa2xx_spi_dma_transfer_complete(drv_data); 238 + } 239 + } 240 + 241 + irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) 242 + { 243 + u32 irq_status; 244 + void __iomem *reg = drv_data->ioaddr; 245 + 246 + irq_status = read_SSSR(reg) & drv_data->mask_sr; 247 + if (irq_status & SSSR_ROR) { 248 + pxa2xx_spi_dma_error_stop(drv_data, 249 + "dma_transfer: fifo overrun"); 250 + return IRQ_HANDLED; 251 + } 252 + 253 + /* Check for false positive timeout */ 254 + if ((irq_status & SSSR_TINT) 255 + && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { 256 + write_SSSR(SSSR_TINT, reg); 257 + return IRQ_HANDLED; 258 + } 259 + 260 + if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { 261 + 262 + /* Clear and disable timeout interrupt, do the rest in 263 + * dma_transfer_complete */ 264 + if (!pxa25x_ssp_comp(drv_data)) 265 + write_SSTO(0, reg); 266 + 267 + /* finish this transfer, start the next */ 268 + pxa2xx_spi_dma_transfer_complete(drv_data); 269 + 270 + return IRQ_HANDLED; 271 + } 272 + 273 + /* Opps problem detected */ 274 + return IRQ_NONE; 275 + } 276 + 277 + int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) 278 + { 279 + u32 dma_width; 280 + 281 + switch (drv_data->n_bytes) { 282 + case 1: 283 + dma_width = DCMD_WIDTH1; 284 + break; 285 + case 2: 286 + dma_width = DCMD_WIDTH2; 287 + break; 288 + default: 289 + dma_width = DCMD_WIDTH4; 290 + break; 291 + } 292 + 293 + /* Setup rx DMA Channel */ 294 + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 295 + DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; 296 + DTADR(drv_data->rx_channel) = drv_data->rx_dma; 297 + if (drv_data->rx == drv_data->null_dma_buf) 298 + /* No target address increment */ 299 + DCMD(drv_data->rx_channel) = DCMD_FLOWSRC 300 + | dma_width 301 + | dma_burst 302 + | drv_data->len; 303 + else 304 + DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR 305 + | DCMD_FLOWSRC 306 + | dma_width 307 + | dma_burst 308 + | drv_data->len; 309 + 310 + /* Setup tx DMA Channel */ 311 + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 312 + DSADR(drv_data->tx_channel) = drv_data->tx_dma; 313 + DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; 314 + if (drv_data->tx == drv_data->null_dma_buf) 315 + /* No source address increment */ 316 + DCMD(drv_data->tx_channel) = DCMD_FLOWTRG 317 + | dma_width 318 + | dma_burst 319 + | drv_data->len; 320 + else 321 + DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR 322 + | DCMD_FLOWTRG 323 + | dma_width 324 + | dma_burst 325 + | drv_data->len; 326 + 327 + /* Enable dma end irqs on SSP to detect end of transfer */ 328 + if (drv_data->ssp_type == PXA25x_SSP) 329 + DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; 330 + 331 + return 0; 332 + } 333 + 334 + void pxa2xx_spi_dma_start(struct driver_data *drv_data) 335 + { 336 + DCSR(drv_data->rx_channel) |= DCSR_RUN; 337 + DCSR(drv_data->tx_channel) |= DCSR_RUN; 338 + } 339 + 340 + int pxa2xx_spi_dma_setup(struct driver_data *drv_data) 341 + { 342 + struct device *dev = &drv_data->pdev->dev; 343 + struct ssp_device *ssp = drv_data->ssp; 344 + 345 + /* Get two DMA channels (rx and tx) */ 346 + drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", 347 + DMA_PRIO_HIGH, 348 + pxa2xx_spi_dma_handler, 349 + drv_data); 350 + if (drv_data->rx_channel < 0) { 351 + dev_err(dev, "problem (%d) requesting rx channel\n", 352 + drv_data->rx_channel); 353 + return -ENODEV; 354 + } 355 + drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", 356 + DMA_PRIO_MEDIUM, 357 + pxa2xx_spi_dma_handler, 358 + drv_data); 359 + if (drv_data->tx_channel < 0) { 360 + dev_err(dev, "problem (%d) requesting tx channel\n", 361 + drv_data->tx_channel); 362 + pxa_free_dma(drv_data->rx_channel); 363 + return -ENODEV; 364 + } 365 + 366 + DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; 367 + DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; 368 + 369 + return 0; 370 + } 371 + 372 + void pxa2xx_spi_dma_release(struct driver_data *drv_data) 373 + { 374 + struct ssp_device *ssp = drv_data->ssp; 375 + 376 + DRCMR(ssp->drcmr_rx) = 0; 377 + DRCMR(ssp->drcmr_tx) = 0; 378 + 379 + if (drv_data->tx_channel != 0) 380 + pxa_free_dma(drv_data->tx_channel); 381 + if (drv_data->rx_channel != 0) 382 + pxa_free_dma(drv_data->rx_channel); 383 + } 384 + 385 + void pxa2xx_spi_dma_resume(struct driver_data *drv_data) 386 + { 387 + if (drv_data->rx_channel != -1) 388 + DRCMR(drv_data->ssp->drcmr_rx) = 389 + DRCMR_MAPVLD | drv_data->rx_channel; 390 + if (drv_data->tx_channel != -1) 391 + DRCMR(drv_data->ssp->drcmr_tx) = 392 + DRCMR_MAPVLD | drv_data->tx_channel; 393 + } 394 + 395 + int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, 396 + struct spi_device *spi, 397 + u8 bits_per_word, u32 *burst_code, 398 + u32 *threshold) 399 + { 400 + struct pxa2xx_spi_chip *chip_info = 401 + (struct pxa2xx_spi_chip *)spi->controller_data; 402 + int bytes_per_word; 403 + int burst_bytes; 404 + int thresh_words; 405 + int req_burst_size; 406 + int retval = 0; 407 + 408 + /* Set the threshold (in registers) to equal the same amount of data 409 + * as represented by burst size (in bytes). The computation below 410 + * is (burst_size rounded up to nearest 8 byte, word or long word) 411 + * divided by (bytes/register); the tx threshold is the inverse of 412 + * the rx, so that there will always be enough data in the rx fifo 413 + * to satisfy a burst, and there will always be enough space in the 414 + * tx fifo to accept a burst (a tx burst will overwrite the fifo if 415 + * there is not enough space), there must always remain enough empty 416 + * space in the rx fifo for any data loaded to the tx fifo. 417 + * Whenever burst_size (in bytes) equals bits/word, the fifo threshold 418 + * will be 8, or half the fifo; 419 + * The threshold can only be set to 2, 4 or 8, but not 16, because 420 + * to burst 16 to the tx fifo, the fifo would have to be empty; 421 + * however, the minimum fifo trigger level is 1, and the tx will 422 + * request service when the fifo is at this level, with only 15 spaces. 423 + */ 424 + 425 + /* find bytes/word */ 426 + if (bits_per_word <= 8) 427 + bytes_per_word = 1; 428 + else if (bits_per_word <= 16) 429 + bytes_per_word = 2; 430 + else 431 + bytes_per_word = 4; 432 + 433 + /* use struct pxa2xx_spi_chip->dma_burst_size if available */ 434 + if (chip_info) 435 + req_burst_size = chip_info->dma_burst_size; 436 + else { 437 + switch (chip->dma_burst_size) { 438 + default: 439 + /* if the default burst size is not set, 440 + * do it now */ 441 + chip->dma_burst_size = DCMD_BURST8; 442 + case DCMD_BURST8: 443 + req_burst_size = 8; 444 + break; 445 + case DCMD_BURST16: 446 + req_burst_size = 16; 447 + break; 448 + case DCMD_BURST32: 449 + req_burst_size = 32; 450 + break; 451 + } 452 + } 453 + if (req_burst_size <= 8) { 454 + *burst_code = DCMD_BURST8; 455 + burst_bytes = 8; 456 + } else if (req_burst_size <= 16) { 457 + if (bytes_per_word == 1) { 458 + /* don't burst more than 1/2 the fifo */ 459 + *burst_code = DCMD_BURST8; 460 + burst_bytes = 8; 461 + retval = 1; 462 + } else { 463 + *burst_code = DCMD_BURST16; 464 + burst_bytes = 16; 465 + } 466 + } else { 467 + if (bytes_per_word == 1) { 468 + /* don't burst more than 1/2 the fifo */ 469 + *burst_code = DCMD_BURST8; 470 + burst_bytes = 8; 471 + retval = 1; 472 + } else if (bytes_per_word == 2) { 473 + /* don't burst more than 1/2 the fifo */ 474 + *burst_code = DCMD_BURST16; 475 + burst_bytes = 16; 476 + retval = 1; 477 + } else { 478 + *burst_code = DCMD_BURST32; 479 + burst_bytes = 32; 480 + } 481 + } 482 + 483 + thresh_words = burst_bytes / bytes_per_word; 484 + 485 + /* thresh_words will be between 2 and 8 */ 486 + *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) 487 + | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); 488 + 489 + return retval; 490 + }
+27 -566
drivers/spi/spi-pxa2xx.c
··· 24 24 #include <linux/interrupt.h> 25 25 #include <linux/platform_device.h> 26 26 #include <linux/spi/pxa2xx_spi.h> 27 - #include <linux/dma-mapping.h> 28 27 #include <linux/spi/spi.h> 29 28 #include <linux/workqueue.h> 30 29 #include <linux/delay.h> ··· 35 36 #include <asm/irq.h> 36 37 #include <asm/delay.h> 37 38 39 + #include "spi-pxa2xx.h" 38 40 39 41 MODULE_AUTHOR("Stephen Street"); 40 42 MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); ··· 45 45 #define MAX_BUSES 3 46 46 47 47 #define TIMOUT_DFLT 1000 48 - 49 - #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) 50 - #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) 51 - #define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT) 52 - #define MAX_DMA_LEN 8191 53 - #define DMA_ALIGNMENT 8 54 48 55 49 /* 56 50 * for testing SSCR1 changes that require SSP restart, basically ··· 59 65 | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \ 60 66 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ 61 67 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) 62 - 63 - #define DEFINE_SSP_REG(reg, off) \ 64 - static inline u32 read_##reg(void const __iomem *p) \ 65 - { return __raw_readl(p + (off)); } \ 66 - \ 67 - static inline void write_##reg(u32 v, void __iomem *p) \ 68 - { __raw_writel(v, p + (off)); } 69 - 70 - DEFINE_SSP_REG(SSCR0, 0x00) 71 - DEFINE_SSP_REG(SSCR1, 0x04) 72 - DEFINE_SSP_REG(SSSR, 0x08) 73 - DEFINE_SSP_REG(SSITR, 0x0c) 74 - DEFINE_SSP_REG(SSDR, 0x10) 75 - DEFINE_SSP_REG(SSTO, 0x28) 76 - DEFINE_SSP_REG(SSPSP, 0x2c) 77 - 78 - #define START_STATE ((void*)0) 79 - #define RUNNING_STATE ((void*)1) 80 - #define DONE_STATE ((void*)2) 81 - #define ERROR_STATE ((void*)-1) 82 - 83 - struct driver_data { 84 - /* Driver model hookup */ 85 - struct platform_device *pdev; 86 - 87 - /* SSP Info */ 88 - struct ssp_device *ssp; 89 - 90 - /* SPI framework hookup */ 91 - enum pxa_ssp_type ssp_type; 92 - struct spi_master *master; 93 - 94 - /* PXA hookup */ 95 - struct pxa2xx_spi_master *master_info; 96 - 97 - /* DMA setup stuff */ 98 - int rx_channel; 99 - int tx_channel; 100 - u32 *null_dma_buf; 101 - 102 - /* SSP register addresses */ 103 - void __iomem *ioaddr; 104 - u32 ssdr_physical; 105 - 106 - /* SSP masks*/ 107 - u32 dma_cr1; 108 - u32 int_cr1; 109 - u32 clear_sr; 110 - u32 mask_sr; 111 - 112 - /* Maximun clock rate */ 113 - unsigned long max_clk_rate; 114 - 115 - /* Message Transfer pump */ 116 - struct tasklet_struct pump_transfers; 117 - 118 - /* Current message transfer state info */ 119 - struct spi_message* cur_msg; 120 - struct spi_transfer* cur_transfer; 121 - struct chip_data *cur_chip; 122 - size_t len; 123 - void *tx; 124 - void *tx_end; 125 - void *rx; 126 - void *rx_end; 127 - int dma_mapped; 128 - dma_addr_t rx_dma; 129 - dma_addr_t tx_dma; 130 - size_t rx_map_len; 131 - size_t tx_map_len; 132 - u8 n_bytes; 133 - u32 dma_width; 134 - int (*write)(struct driver_data *drv_data); 135 - int (*read)(struct driver_data *drv_data); 136 - irqreturn_t (*transfer_handler)(struct driver_data *drv_data); 137 - void (*cs_control)(u32 command); 138 - }; 139 - 140 - struct chip_data { 141 - u32 cr0; 142 - u32 cr1; 143 - u32 psp; 144 - u32 timeout; 145 - u8 n_bytes; 146 - u32 dma_width; 147 - u32 dma_burst_size; 148 - u32 threshold; 149 - u32 dma_threshold; 150 - u8 enable_dma; 151 - u8 bits_per_word; 152 - u32 speed_hz; 153 - union { 154 - int gpio_cs; 155 - unsigned int frm; 156 - }; 157 - int gpio_cs_inverted; 158 - int (*write)(struct driver_data *drv_data); 159 - int (*read)(struct driver_data *drv_data); 160 - void (*cs_control)(u32 command); 161 - }; 162 68 163 69 static void cs_assert(struct driver_data *drv_data) 164 70 { ··· 94 200 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); 95 201 } 96 202 97 - static void write_SSSR_CS(struct driver_data *drv_data, u32 val) 98 - { 99 - void __iomem *reg = drv_data->ioaddr; 100 - 101 - if (drv_data->ssp_type == CE4100_SSP) 102 - val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; 103 - 104 - write_SSSR(val, reg); 105 - } 106 - 107 - static int pxa25x_ssp_comp(struct driver_data *drv_data) 108 - { 109 - if (drv_data->ssp_type == PXA25x_SSP) 110 - return 1; 111 - if (drv_data->ssp_type == CE4100_SSP) 112 - return 1; 113 - return 0; 114 - } 115 - 116 - static int flush(struct driver_data *drv_data) 203 + int pxa2xx_spi_flush(struct driver_data *drv_data) 117 204 { 118 205 unsigned long limit = loops_per_jiffy << 1; 119 206 ··· 220 345 return drv_data->rx == drv_data->rx_end; 221 346 } 222 347 223 - static void *next_transfer(struct driver_data *drv_data) 348 + void *pxa2xx_spi_next_transfer(struct driver_data *drv_data) 224 349 { 225 350 struct spi_message *msg = drv_data->cur_msg; 226 351 struct spi_transfer *trans = drv_data->cur_transfer; ··· 234 359 return RUNNING_STATE; 235 360 } else 236 361 return DONE_STATE; 237 - } 238 - 239 - static int map_dma_buffers(struct driver_data *drv_data) 240 - { 241 - struct spi_message *msg = drv_data->cur_msg; 242 - struct device *dev = &msg->spi->dev; 243 - 244 - if (!drv_data->cur_chip->enable_dma) 245 - return 0; 246 - 247 - if (msg->is_dma_mapped) 248 - return drv_data->rx_dma && drv_data->tx_dma; 249 - 250 - if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) 251 - return 0; 252 - 253 - /* Modify setup if rx buffer is null */ 254 - if (drv_data->rx == NULL) { 255 - *drv_data->null_dma_buf = 0; 256 - drv_data->rx = drv_data->null_dma_buf; 257 - drv_data->rx_map_len = 4; 258 - } else 259 - drv_data->rx_map_len = drv_data->len; 260 - 261 - 262 - /* Modify setup if tx buffer is null */ 263 - if (drv_data->tx == NULL) { 264 - *drv_data->null_dma_buf = 0; 265 - drv_data->tx = drv_data->null_dma_buf; 266 - drv_data->tx_map_len = 4; 267 - } else 268 - drv_data->tx_map_len = drv_data->len; 269 - 270 - /* Stream map the tx buffer. Always do DMA_TO_DEVICE first 271 - * so we flush the cache *before* invalidating it, in case 272 - * the tx and rx buffers overlap. 273 - */ 274 - drv_data->tx_dma = dma_map_single(dev, drv_data->tx, 275 - drv_data->tx_map_len, DMA_TO_DEVICE); 276 - if (dma_mapping_error(dev, drv_data->tx_dma)) 277 - return 0; 278 - 279 - /* Stream map the rx buffer */ 280 - drv_data->rx_dma = dma_map_single(dev, drv_data->rx, 281 - drv_data->rx_map_len, DMA_FROM_DEVICE); 282 - if (dma_mapping_error(dev, drv_data->rx_dma)) { 283 - dma_unmap_single(dev, drv_data->tx_dma, 284 - drv_data->tx_map_len, DMA_TO_DEVICE); 285 - return 0; 286 - } 287 - 288 - return 1; 289 - } 290 - 291 - static void unmap_dma_buffers(struct driver_data *drv_data) 292 - { 293 - struct device *dev; 294 - 295 - if (!drv_data->dma_mapped) 296 - return; 297 - 298 - if (!drv_data->cur_msg->is_dma_mapped) { 299 - dev = &drv_data->cur_msg->spi->dev; 300 - dma_unmap_single(dev, drv_data->rx_dma, 301 - drv_data->rx_map_len, DMA_FROM_DEVICE); 302 - dma_unmap_single(dev, drv_data->tx_dma, 303 - drv_data->tx_map_len, DMA_TO_DEVICE); 304 - } 305 - 306 - drv_data->dma_mapped = 0; 307 362 } 308 363 309 364 /* caller already set message->status; dma and pio irqs are blocked */ ··· 288 483 drv_data->cur_chip = NULL; 289 484 } 290 485 291 - static int wait_ssp_rx_stall(void const __iomem *ioaddr) 292 - { 293 - unsigned long limit = loops_per_jiffy << 1; 294 - 295 - while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) 296 - cpu_relax(); 297 - 298 - return limit; 299 - } 300 - 301 - static int wait_dma_channel_stop(int channel) 302 - { 303 - unsigned long limit = loops_per_jiffy << 1; 304 - 305 - while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) 306 - cpu_relax(); 307 - 308 - return limit; 309 - } 310 - 311 - static void dma_error_stop(struct driver_data *drv_data, const char *msg) 312 - { 313 - void __iomem *reg = drv_data->ioaddr; 314 - 315 - /* Stop and reset */ 316 - DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 317 - DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 318 - write_SSSR_CS(drv_data, drv_data->clear_sr); 319 - write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 320 - if (!pxa25x_ssp_comp(drv_data)) 321 - write_SSTO(0, reg); 322 - flush(drv_data); 323 - write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 324 - 325 - unmap_dma_buffers(drv_data); 326 - 327 - dev_err(&drv_data->pdev->dev, "%s\n", msg); 328 - 329 - drv_data->cur_msg->state = ERROR_STATE; 330 - tasklet_schedule(&drv_data->pump_transfers); 331 - } 332 - 333 - static void dma_transfer_complete(struct driver_data *drv_data) 334 - { 335 - void __iomem *reg = drv_data->ioaddr; 336 - struct spi_message *msg = drv_data->cur_msg; 337 - 338 - /* Clear and disable interrupts on SSP and DMA channels*/ 339 - write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 340 - write_SSSR_CS(drv_data, drv_data->clear_sr); 341 - DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 342 - DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 343 - 344 - if (wait_dma_channel_stop(drv_data->rx_channel) == 0) 345 - dev_err(&drv_data->pdev->dev, 346 - "dma_handler: dma rx channel stop failed\n"); 347 - 348 - if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) 349 - dev_err(&drv_data->pdev->dev, 350 - "dma_transfer: ssp rx stall failed\n"); 351 - 352 - unmap_dma_buffers(drv_data); 353 - 354 - /* update the buffer pointer for the amount completed in dma */ 355 - drv_data->rx += drv_data->len - 356 - (DCMD(drv_data->rx_channel) & DCMD_LENGTH); 357 - 358 - /* read trailing data from fifo, it does not matter how many 359 - * bytes are in the fifo just read until buffer is full 360 - * or fifo is empty, which ever occurs first */ 361 - drv_data->read(drv_data); 362 - 363 - /* return count of what was actually read */ 364 - msg->actual_length += drv_data->len - 365 - (drv_data->rx_end - drv_data->rx); 366 - 367 - /* Transfer delays and chip select release are 368 - * handled in pump_transfers or giveback 369 - */ 370 - 371 - /* Move to next transfer */ 372 - msg->state = next_transfer(drv_data); 373 - 374 - /* Schedule transfer tasklet */ 375 - tasklet_schedule(&drv_data->pump_transfers); 376 - } 377 - 378 - static void dma_handler(int channel, void *data) 379 - { 380 - struct driver_data *drv_data = data; 381 - u32 irq_status = DCSR(channel) & DMA_INT_MASK; 382 - 383 - if (irq_status & DCSR_BUSERR) { 384 - 385 - if (channel == drv_data->tx_channel) 386 - dma_error_stop(drv_data, 387 - "dma_handler: " 388 - "bad bus address on tx channel"); 389 - else 390 - dma_error_stop(drv_data, 391 - "dma_handler: " 392 - "bad bus address on rx channel"); 393 - return; 394 - } 395 - 396 - /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ 397 - if ((channel == drv_data->tx_channel) 398 - && (irq_status & DCSR_ENDINTR) 399 - && (drv_data->ssp_type == PXA25x_SSP)) { 400 - 401 - /* Wait for rx to stall */ 402 - if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) 403 - dev_err(&drv_data->pdev->dev, 404 - "dma_handler: ssp rx stall failed\n"); 405 - 406 - /* finish this transfer, start the next */ 407 - dma_transfer_complete(drv_data); 408 - } 409 - } 410 - 411 - static irqreturn_t dma_transfer(struct driver_data *drv_data) 412 - { 413 - u32 irq_status; 414 - void __iomem *reg = drv_data->ioaddr; 415 - 416 - irq_status = read_SSSR(reg) & drv_data->mask_sr; 417 - if (irq_status & SSSR_ROR) { 418 - dma_error_stop(drv_data, "dma_transfer: fifo overrun"); 419 - return IRQ_HANDLED; 420 - } 421 - 422 - /* Check for false positive timeout */ 423 - if ((irq_status & SSSR_TINT) 424 - && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { 425 - write_SSSR(SSSR_TINT, reg); 426 - return IRQ_HANDLED; 427 - } 428 - 429 - if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { 430 - 431 - /* Clear and disable timeout interrupt, do the rest in 432 - * dma_transfer_complete */ 433 - if (!pxa25x_ssp_comp(drv_data)) 434 - write_SSTO(0, reg); 435 - 436 - /* finish this transfer, start the next */ 437 - dma_transfer_complete(drv_data); 438 - 439 - return IRQ_HANDLED; 440 - } 441 - 442 - /* Opps problem detected */ 443 - return IRQ_NONE; 444 - } 445 - 446 486 static void reset_sccr1(struct driver_data *drv_data) 447 487 { 448 488 void __iomem *reg = drv_data->ioaddr; ··· 309 659 reset_sccr1(drv_data); 310 660 if (!pxa25x_ssp_comp(drv_data)) 311 661 write_SSTO(0, reg); 312 - flush(drv_data); 662 + pxa2xx_spi_flush(drv_data); 313 663 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 314 664 315 665 dev_err(&drv_data->pdev->dev, "%s\n", msg); ··· 337 687 */ 338 688 339 689 /* Move to next transfer */ 340 - drv_data->cur_msg->state = next_transfer(drv_data); 690 + drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data); 341 691 342 692 /* Schedule transfer tasklet */ 343 693 tasklet_schedule(&drv_data->pump_transfers); ··· 448 798 return drv_data->transfer_handler(drv_data); 449 799 } 450 800 451 - static int set_dma_burst_and_threshold(struct chip_data *chip, 452 - struct spi_device *spi, 453 - u8 bits_per_word, u32 *burst_code, 454 - u32 *threshold) 455 - { 456 - struct pxa2xx_spi_chip *chip_info = 457 - (struct pxa2xx_spi_chip *)spi->controller_data; 458 - int bytes_per_word; 459 - int burst_bytes; 460 - int thresh_words; 461 - int req_burst_size; 462 - int retval = 0; 463 - 464 - /* Set the threshold (in registers) to equal the same amount of data 465 - * as represented by burst size (in bytes). The computation below 466 - * is (burst_size rounded up to nearest 8 byte, word or long word) 467 - * divided by (bytes/register); the tx threshold is the inverse of 468 - * the rx, so that there will always be enough data in the rx fifo 469 - * to satisfy a burst, and there will always be enough space in the 470 - * tx fifo to accept a burst (a tx burst will overwrite the fifo if 471 - * there is not enough space), there must always remain enough empty 472 - * space in the rx fifo for any data loaded to the tx fifo. 473 - * Whenever burst_size (in bytes) equals bits/word, the fifo threshold 474 - * will be 8, or half the fifo; 475 - * The threshold can only be set to 2, 4 or 8, but not 16, because 476 - * to burst 16 to the tx fifo, the fifo would have to be empty; 477 - * however, the minimum fifo trigger level is 1, and the tx will 478 - * request service when the fifo is at this level, with only 15 spaces. 479 - */ 480 - 481 - /* find bytes/word */ 482 - if (bits_per_word <= 8) 483 - bytes_per_word = 1; 484 - else if (bits_per_word <= 16) 485 - bytes_per_word = 2; 486 - else 487 - bytes_per_word = 4; 488 - 489 - /* use struct pxa2xx_spi_chip->dma_burst_size if available */ 490 - if (chip_info) 491 - req_burst_size = chip_info->dma_burst_size; 492 - else { 493 - switch (chip->dma_burst_size) { 494 - default: 495 - /* if the default burst size is not set, 496 - * do it now */ 497 - chip->dma_burst_size = DCMD_BURST8; 498 - case DCMD_BURST8: 499 - req_burst_size = 8; 500 - break; 501 - case DCMD_BURST16: 502 - req_burst_size = 16; 503 - break; 504 - case DCMD_BURST32: 505 - req_burst_size = 32; 506 - break; 507 - } 508 - } 509 - if (req_burst_size <= 8) { 510 - *burst_code = DCMD_BURST8; 511 - burst_bytes = 8; 512 - } else if (req_burst_size <= 16) { 513 - if (bytes_per_word == 1) { 514 - /* don't burst more than 1/2 the fifo */ 515 - *burst_code = DCMD_BURST8; 516 - burst_bytes = 8; 517 - retval = 1; 518 - } else { 519 - *burst_code = DCMD_BURST16; 520 - burst_bytes = 16; 521 - } 522 - } else { 523 - if (bytes_per_word == 1) { 524 - /* don't burst more than 1/2 the fifo */ 525 - *burst_code = DCMD_BURST8; 526 - burst_bytes = 8; 527 - retval = 1; 528 - } else if (bytes_per_word == 2) { 529 - /* don't burst more than 1/2 the fifo */ 530 - *burst_code = DCMD_BURST16; 531 - burst_bytes = 16; 532 - retval = 1; 533 - } else { 534 - *burst_code = DCMD_BURST32; 535 - burst_bytes = 32; 536 - } 537 - } 538 - 539 - thresh_words = burst_bytes / bytes_per_word; 540 - 541 - /* thresh_words will be between 2 and 8 */ 542 - *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) 543 - | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); 544 - 545 - return retval; 546 - } 547 - 548 801 static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) 549 802 { 550 803 unsigned long ssp_clk = drv_data->max_clk_rate; ··· 509 956 cs_deassert(drv_data); 510 957 } 511 958 512 - /* Check for transfers that need multiple DMA segments */ 513 - if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { 959 + /* Check if we can DMA this transfer */ 960 + if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) { 514 961 515 962 /* reject already-mapped transfers; PIO won't always work */ 516 963 if (message->is_dma_mapped ··· 533 980 } 534 981 535 982 /* Setup the transfer state based on the type of transfer */ 536 - if (flush(drv_data) == 0) { 983 + if (pxa2xx_spi_flush(drv_data) == 0) { 537 984 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 538 985 message->status = -EIO; 539 986 giveback(drv_data); 540 987 return; 541 988 } 542 989 drv_data->n_bytes = chip->n_bytes; 543 - drv_data->dma_width = chip->dma_width; 544 990 drv_data->tx = (void *)transfer->tx_buf; 545 991 drv_data->tx_end = drv_data->tx + transfer->len; 546 992 drv_data->rx = transfer->rx_buf; 547 993 drv_data->rx_end = drv_data->rx + transfer->len; 548 994 drv_data->rx_dma = transfer->rx_dma; 549 995 drv_data->tx_dma = transfer->tx_dma; 550 - drv_data->len = transfer->len & DCMD_LENGTH; 996 + drv_data->len = transfer->len; 551 997 drv_data->write = drv_data->tx ? chip->write : null_writer; 552 998 drv_data->read = drv_data->rx ? chip->read : null_reader; 553 999 ··· 567 1015 568 1016 if (bits <= 8) { 569 1017 drv_data->n_bytes = 1; 570 - drv_data->dma_width = DCMD_WIDTH1; 571 1018 drv_data->read = drv_data->read != null_reader ? 572 1019 u8_reader : null_reader; 573 1020 drv_data->write = drv_data->write != null_writer ? 574 1021 u8_writer : null_writer; 575 1022 } else if (bits <= 16) { 576 1023 drv_data->n_bytes = 2; 577 - drv_data->dma_width = DCMD_WIDTH2; 578 1024 drv_data->read = drv_data->read != null_reader ? 579 1025 u16_reader : null_reader; 580 1026 drv_data->write = drv_data->write != null_writer ? 581 1027 u16_writer : null_writer; 582 1028 } else if (bits <= 32) { 583 1029 drv_data->n_bytes = 4; 584 - drv_data->dma_width = DCMD_WIDTH4; 585 1030 drv_data->read = drv_data->read != null_reader ? 586 1031 u32_reader : null_reader; 587 1032 drv_data->write = drv_data->write != null_writer ? ··· 587 1038 /* if bits/word is changed in dma mode, then must check the 588 1039 * thresholds and burst also */ 589 1040 if (chip->enable_dma) { 590 - if (set_dma_burst_and_threshold(chip, message->spi, 1041 + if (pxa2xx_spi_set_dma_burst_and_threshold(chip, 1042 + message->spi, 591 1043 bits, &dma_burst, 592 1044 &dma_thresh)) 593 1045 if (printk_ratelimit()) ··· 607 1057 608 1058 message->state = RUNNING_STATE; 609 1059 610 - /* Try to map dma buffer and do a dma transfer if successful, but 611 - * only if the length is non-zero and less than MAX_DMA_LEN. 612 - * 613 - * Zero-length non-descriptor DMA is illegal on PXA2xx; force use 614 - * of PIO instead. Care is needed above because the transfer may 615 - * have have been passed with buffers that are already dma mapped. 616 - * A zero-length transfer in PIO mode will not try to write/read 617 - * to/from the buffers 618 - * 619 - * REVISIT large transfers are exactly where we most want to be 620 - * using DMA. If this happens much, split those transfers into 621 - * multiple DMA segments rather than forcing PIO. 622 - */ 623 1060 drv_data->dma_mapped = 0; 624 - if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN) 625 - drv_data->dma_mapped = map_dma_buffers(drv_data); 1061 + if (pxa2xx_spi_dma_is_possible(drv_data->len)) 1062 + drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data); 626 1063 if (drv_data->dma_mapped) { 627 1064 628 1065 /* Ensure we have the correct interrupt handler */ 629 - drv_data->transfer_handler = dma_transfer; 1066 + drv_data->transfer_handler = pxa2xx_spi_dma_transfer; 630 1067 631 - /* Setup rx DMA Channel */ 632 - DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 633 - DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; 634 - DTADR(drv_data->rx_channel) = drv_data->rx_dma; 635 - if (drv_data->rx == drv_data->null_dma_buf) 636 - /* No target address increment */ 637 - DCMD(drv_data->rx_channel) = DCMD_FLOWSRC 638 - | drv_data->dma_width 639 - | dma_burst 640 - | drv_data->len; 641 - else 642 - DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR 643 - | DCMD_FLOWSRC 644 - | drv_data->dma_width 645 - | dma_burst 646 - | drv_data->len; 647 - 648 - /* Setup tx DMA Channel */ 649 - DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 650 - DSADR(drv_data->tx_channel) = drv_data->tx_dma; 651 - DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; 652 - if (drv_data->tx == drv_data->null_dma_buf) 653 - /* No source address increment */ 654 - DCMD(drv_data->tx_channel) = DCMD_FLOWTRG 655 - | drv_data->dma_width 656 - | dma_burst 657 - | drv_data->len; 658 - else 659 - DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR 660 - | DCMD_FLOWTRG 661 - | drv_data->dma_width 662 - | dma_burst 663 - | drv_data->len; 664 - 665 - /* Enable dma end irqs on SSP to detect end of transfer */ 666 - if (drv_data->ssp_type == PXA25x_SSP) 667 - DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; 1068 + pxa2xx_spi_dma_prepare(drv_data, dma_burst); 668 1069 669 1070 /* Clear status and start DMA engine */ 670 1071 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; 671 1072 write_SSSR(drv_data->clear_sr, reg); 672 - DCSR(drv_data->rx_channel) |= DCSR_RUN; 673 - DCSR(drv_data->tx_channel) |= DCSR_RUN; 1073 + 1074 + pxa2xx_spi_dma_start(drv_data); 674 1075 } else { 675 1076 /* Ensure we have the correct interrupt handler */ 676 1077 drv_data->transfer_handler = interrupt_transfer; ··· 763 1262 chip->gpio_cs = -1; 764 1263 chip->enable_dma = 0; 765 1264 chip->timeout = TIMOUT_DFLT; 766 - chip->dma_burst_size = drv_data->master_info->enable_dma ? 767 - DCMD_BURST8 : 0; 768 1265 } 769 1266 770 1267 /* protocol drivers may change the chip settings, so... ··· 792 1293 * burst and threshold can still respond to changes in bits_per_word */ 793 1294 if (chip->enable_dma) { 794 1295 /* set up legal burst and threshold for dma */ 795 - if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word, 1296 + if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi, 1297 + spi->bits_per_word, 796 1298 &chip->dma_burst_size, 797 1299 &chip->dma_threshold)) { 798 1300 dev_warn(&spi->dev, "in setup: DMA burst size reduced " ··· 828 1328 829 1329 if (spi->bits_per_word <= 8) { 830 1330 chip->n_bytes = 1; 831 - chip->dma_width = DCMD_WIDTH1; 832 1331 chip->read = u8_reader; 833 1332 chip->write = u8_writer; 834 1333 } else if (spi->bits_per_word <= 16) { 835 1334 chip->n_bytes = 2; 836 - chip->dma_width = DCMD_WIDTH2; 837 1335 chip->read = u16_reader; 838 1336 chip->write = u16_writer; 839 1337 } else if (spi->bits_per_word <= 32) { 840 1338 chip->cr0 |= SSCR0_EDSS; 841 1339 chip->n_bytes = 4; 842 - chip->dma_width = DCMD_WIDTH4; 843 1340 chip->read = u32_reader; 844 1341 chip->write = u32_writer; 845 1342 } else { ··· 944 1447 drv_data->tx_channel = -1; 945 1448 drv_data->rx_channel = -1; 946 1449 if (platform_info->enable_dma) { 947 - 948 - /* Get two DMA channels (rx and tx) */ 949 - drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", 950 - DMA_PRIO_HIGH, 951 - dma_handler, 952 - drv_data); 953 - if (drv_data->rx_channel < 0) { 954 - dev_err(dev, "problem (%d) requesting rx channel\n", 955 - drv_data->rx_channel); 956 - status = -ENODEV; 957 - goto out_error_irq_alloc; 1450 + status = pxa2xx_spi_dma_setup(drv_data); 1451 + if (status) { 1452 + dev_warn(dev, "failed to setup DMA, using PIO\n"); 1453 + platform_info->enable_dma = false; 958 1454 } 959 - drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", 960 - DMA_PRIO_MEDIUM, 961 - dma_handler, 962 - drv_data); 963 - if (drv_data->tx_channel < 0) { 964 - dev_err(dev, "problem (%d) requesting tx channel\n", 965 - drv_data->tx_channel); 966 - status = -ENODEV; 967 - goto out_error_dma_alloc; 968 - } 969 - 970 - DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; 971 - DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; 972 1455 } 973 1456 974 1457 /* Enable SOC clock */ ··· 984 1507 985 1508 out_error_clock_enabled: 986 1509 clk_disable_unprepare(ssp->clk); 987 - 988 - out_error_dma_alloc: 989 - if (drv_data->tx_channel != -1) 990 - pxa_free_dma(drv_data->tx_channel); 991 - if (drv_data->rx_channel != -1) 992 - pxa_free_dma(drv_data->rx_channel); 993 - 994 - out_error_irq_alloc: 1510 + pxa2xx_spi_dma_release(drv_data); 995 1511 free_irq(ssp->irq, drv_data); 996 1512 997 1513 out_error_master_alloc: ··· 1007 1537 clk_disable_unprepare(ssp->clk); 1008 1538 1009 1539 /* Release DMA */ 1010 - if (drv_data->master_info->enable_dma) { 1011 - DRCMR(ssp->drcmr_rx) = 0; 1012 - DRCMR(ssp->drcmr_tx) = 0; 1013 - pxa_free_dma(drv_data->tx_channel); 1014 - pxa_free_dma(drv_data->rx_channel); 1015 - } 1540 + if (drv_data->master_info->enable_dma) 1541 + pxa2xx_spi_dma_release(drv_data); 1016 1542 1017 1543 /* Release IRQ */ 1018 1544 free_irq(ssp->irq, drv_data); ··· 1055 1589 struct ssp_device *ssp = drv_data->ssp; 1056 1590 int status = 0; 1057 1591 1058 - if (drv_data->rx_channel != -1) 1059 - DRCMR(drv_data->ssp->drcmr_rx) = 1060 - DRCMR_MAPVLD | drv_data->rx_channel; 1061 - if (drv_data->tx_channel != -1) 1062 - DRCMR(drv_data->ssp->drcmr_tx) = 1063 - DRCMR_MAPVLD | drv_data->tx_channel; 1592 + pxa2xx_spi_dma_resume(drv_data); 1064 1593 1065 1594 /* Enable the SSP clock */ 1066 1595 clk_prepare_enable(ssp->clk);
+185
drivers/spi/spi-pxa2xx.h
··· 1 + /* 2 + * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs 3 + * Copyright (C) 2013, Intel Corporation 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + */ 9 + 10 + #ifndef SPI_PXA2XX_H 11 + #define SPI_PXA2XX_H 12 + 13 + #include <linux/errno.h> 14 + #include <linux/io.h> 15 + #include <linux/interrupt.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/pxa2xx_ssp.h> 18 + #include <linux/spi/spi.h> 19 + #include <linux/spi/pxa2xx_spi.h> 20 + 21 + struct driver_data { 22 + /* Driver model hookup */ 23 + struct platform_device *pdev; 24 + 25 + /* SSP Info */ 26 + struct ssp_device *ssp; 27 + 28 + /* SPI framework hookup */ 29 + enum pxa_ssp_type ssp_type; 30 + struct spi_master *master; 31 + 32 + /* PXA hookup */ 33 + struct pxa2xx_spi_master *master_info; 34 + 35 + /* PXA private DMA setup stuff */ 36 + int rx_channel; 37 + int tx_channel; 38 + u32 *null_dma_buf; 39 + 40 + /* SSP register addresses */ 41 + void __iomem *ioaddr; 42 + u32 ssdr_physical; 43 + 44 + /* SSP masks*/ 45 + u32 dma_cr1; 46 + u32 int_cr1; 47 + u32 clear_sr; 48 + u32 mask_sr; 49 + 50 + /* Maximun clock rate */ 51 + unsigned long max_clk_rate; 52 + 53 + /* Message Transfer pump */ 54 + struct tasklet_struct pump_transfers; 55 + 56 + /* Current message transfer state info */ 57 + struct spi_message *cur_msg; 58 + struct spi_transfer *cur_transfer; 59 + struct chip_data *cur_chip; 60 + size_t len; 61 + void *tx; 62 + void *tx_end; 63 + void *rx; 64 + void *rx_end; 65 + int dma_mapped; 66 + dma_addr_t rx_dma; 67 + dma_addr_t tx_dma; 68 + size_t rx_map_len; 69 + size_t tx_map_len; 70 + u8 n_bytes; 71 + int (*write)(struct driver_data *drv_data); 72 + int (*read)(struct driver_data *drv_data); 73 + irqreturn_t (*transfer_handler)(struct driver_data *drv_data); 74 + void (*cs_control)(u32 command); 75 + }; 76 + 77 + struct chip_data { 78 + u32 cr0; 79 + u32 cr1; 80 + u32 psp; 81 + u32 timeout; 82 + u8 n_bytes; 83 + u32 dma_burst_size; 84 + u32 threshold; 85 + u32 dma_threshold; 86 + u8 enable_dma; 87 + u8 bits_per_word; 88 + u32 speed_hz; 89 + union { 90 + int gpio_cs; 91 + unsigned int frm; 92 + }; 93 + int gpio_cs_inverted; 94 + int (*write)(struct driver_data *drv_data); 95 + int (*read)(struct driver_data *drv_data); 96 + void (*cs_control)(u32 command); 97 + }; 98 + 99 + #define DEFINE_SSP_REG(reg, off) \ 100 + static inline u32 read_##reg(void const __iomem *p) \ 101 + { return __raw_readl(p + (off)); } \ 102 + \ 103 + static inline void write_##reg(u32 v, void __iomem *p) \ 104 + { __raw_writel(v, p + (off)); } 105 + 106 + DEFINE_SSP_REG(SSCR0, 0x00) 107 + DEFINE_SSP_REG(SSCR1, 0x04) 108 + DEFINE_SSP_REG(SSSR, 0x08) 109 + DEFINE_SSP_REG(SSITR, 0x0c) 110 + DEFINE_SSP_REG(SSDR, 0x10) 111 + DEFINE_SSP_REG(SSTO, 0x28) 112 + DEFINE_SSP_REG(SSPSP, 0x2c) 113 + 114 + #define START_STATE ((void *)0) 115 + #define RUNNING_STATE ((void *)1) 116 + #define DONE_STATE ((void *)2) 117 + #define ERROR_STATE ((void *)-1) 118 + 119 + #define MAX_DMA_LEN 8191 120 + #define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT) 121 + #define DMA_ALIGNMENT 8 122 + 123 + static inline int pxa25x_ssp_comp(struct driver_data *drv_data) 124 + { 125 + if (drv_data->ssp_type == PXA25x_SSP) 126 + return 1; 127 + if (drv_data->ssp_type == CE4100_SSP) 128 + return 1; 129 + return 0; 130 + } 131 + 132 + static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) 133 + { 134 + void __iomem *reg = drv_data->ioaddr; 135 + 136 + if (drv_data->ssp_type == CE4100_SSP) 137 + val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; 138 + 139 + write_SSSR(val, reg); 140 + } 141 + 142 + extern int pxa2xx_spi_flush(struct driver_data *drv_data); 143 + extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data); 144 + 145 + #if defined(CONFIG_SPI_PXA2XX_PXADMA) 146 + extern bool pxa2xx_spi_dma_is_possible(size_t len); 147 + extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data); 148 + extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data); 149 + extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst); 150 + extern void pxa2xx_spi_dma_start(struct driver_data *drv_data); 151 + extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data); 152 + extern void pxa2xx_spi_dma_release(struct driver_data *drv_data); 153 + extern void pxa2xx_spi_dma_resume(struct driver_data *drv_data); 154 + extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, 155 + struct spi_device *spi, 156 + u8 bits_per_word, 157 + u32 *burst_code, 158 + u32 *threshold); 159 + #else 160 + static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; } 161 + static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) 162 + { 163 + return 0; 164 + } 165 + #define pxa2xx_spi_dma_transfer NULL 166 + static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data, 167 + u32 dma_burst) {} 168 + static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {} 169 + static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data) 170 + { 171 + return 0; 172 + } 173 + static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {} 174 + static inline void pxa2xx_spi_dma_resume(struct driver_data *drv_data) {} 175 + static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, 176 + struct spi_device *spi, 177 + u8 bits_per_word, 178 + u32 *burst_code, 179 + u32 *threshold) 180 + { 181 + return -ENODEV; 182 + } 183 + #endif 184 + 185 + #endif /* SPI_PXA2XX_H */
-80
include/linux/spi/pxa2xx_spi.h
··· 53 53 54 54 extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); 55 55 56 - #else 57 - /* 58 - * This is the implemtation for CE4100 on x86. ARM defines them in mach/ or 59 - * plat/ include path. 60 - * The CE4100 does not provide DMA support. This bits are here to let the driver 61 - * compile and will never be used. Maybe we get DMA support at a later point in 62 - * time. 63 - */ 64 - 65 - #define DCSR(n) (n) 66 - #define DSADR(n) (n) 67 - #define DTADR(n) (n) 68 - #define DCMD(n) (n) 69 - #define DRCMR(n) (n) 70 - 71 - #define DCSR_RUN (1 << 31) /* Run Bit */ 72 - #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch */ 73 - #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable */ 74 - #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ 75 - #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ 76 - #define DCSR_ENDINTR (1 << 2) /* End Interrupt */ 77 - #define DCSR_STARTINTR (1 << 1) /* Start Interrupt */ 78 - #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt */ 79 - 80 - #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable */ 81 - #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ 82 - #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ 83 - #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ 84 - #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ 85 - #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ 86 - #define DCSR_EORINTR (1 << 9) /* The end of Receive */ 87 - 88 - #define DRCMR_MAPVLD (1 << 7) /* Map Valid */ 89 - #define DRCMR_CHLNUM 0x1f /* mask for Channel Number */ 90 - 91 - #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor */ 92 - #define DDADR_STOP (1 << 0) /* Stop */ 93 - 94 - #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ 95 - #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ 96 - #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ 97 - #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ 98 - #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ 99 - #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ 100 - #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ 101 - #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ 102 - #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ 103 - #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ 104 - #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ 105 - #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ 106 - #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ 107 - #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ 108 - 109 - /* 110 - * Descriptor structure for PXA's DMA engine 111 - * Note: this structure must always be aligned to a 16-byte boundary. 112 - */ 113 - 114 - typedef enum { 115 - DMA_PRIO_HIGH = 0, 116 - DMA_PRIO_MEDIUM = 1, 117 - DMA_PRIO_LOW = 2 118 - } pxa_dma_prio; 119 - 120 - /* 121 - * DMA registration 122 - */ 123 - 124 - static inline int pxa_request_dma(char *name, 125 - pxa_dma_prio prio, 126 - void (*irq_handler)(int, void *), 127 - void *data) 128 - { 129 - return -ENODEV; 130 - } 131 - 132 - static inline void pxa_free_dma(int dma_ch) 133 - { 134 - } 135 - 136 56 #endif 137 57 #endif