Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'spi-next' from git://git.kernel.org/pub/scm/linux/kernel/git/broonie/misc.git

Pull in the changes Mark has queued up for SPI

+2256 -77
+26
Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt
··· 1 + NVIDIA Tegra20 SFLASH controller. 2 + 3 + Required properties: 4 + - compatible : should be "nvidia,tegra20-sflash". 5 + - reg: Should contain SFLASH registers location and length. 6 + - interrupts: Should contain SFLASH interrupts. 7 + - nvidia,dma-request-selector : The Tegra DMA controller's phandle and 8 + request selector for this SFLASH controller. 9 + 10 + Recommended properties: 11 + - spi-max-frequency: Definition as per 12 + Documentation/devicetree/bindings/spi/spi-bus.txt 13 + 14 + Example: 15 + 16 + spi@7000d600 { 17 + compatible = "nvidia,tegra20-sflash"; 18 + reg = <0x7000c380 0x80>; 19 + interrupts = <0 39 0x04>; 20 + nvidia,dma-request-selector = <&apbdma 16>; 21 + spi-max-frequency = <25000000>; 22 + #address-cells = <1>; 23 + #size-cells = <0>; 24 + status = "disabled"; 25 + }; 26 +
+26
Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt
··· 1 + NVIDIA Tegra20/Tegra30 SLINK controller. 2 + 3 + Required properties: 4 + - compatible : should be "nvidia,tegra20-slink", "nvidia,tegra30-slink". 5 + - reg: Should contain SLINK registers location and length. 6 + - interrupts: Should contain SLINK interrupts. 7 + - nvidia,dma-request-selector : The Tegra DMA controller's phandle and 8 + request selector for this SLINK controller. 9 + 10 + Recommended properties: 11 + - spi-max-frequency: Definition as per 12 + Documentation/devicetree/bindings/spi/spi-bus.txt 13 + 14 + Example: 15 + 16 + slink@7000d600 { 17 + compatible = "nvidia,tegra20-slink"; 18 + reg = <0x7000d600 0x200>; 19 + interrupts = <0 82 0x04>; 20 + nvidia,dma-request-selector = <&apbdma 16>; 21 + spi-max-frequency = <25000000>; 22 + #address-cells = <1>; 23 + #size-cells = <0>; 24 + status = "disabled"; 25 + }; 26 +
+3 -1
Documentation/devicetree/bindings/spi/omap-spi.txt
··· 6 6 - "ti,omap4-spi" for OMAP4+. 7 7 - ti,spi-num-cs : Number of chipselect supported by the instance. 8 8 - ti,hwmods: Name of the hwmod associated to the McSPI 9 - 9 + - ti,pindir-d0-out-d1-in: Select the D0 pin as output and D1 as 10 + input. The default is D0 as input and 11 + D1 as output. 10 12 11 13 Example: 12 14
+14
drivers/spi/Kconfig
··· 392 392 help 393 393 SPI driver for Freescale MXS devices. 394 394 395 + config SPI_TEGRA20_SFLASH 396 + tristate "Nvidia Tegra20 Serial flash Controller" 397 + depends on ARCH_TEGRA 398 + help 399 + SPI driver for Nvidia Tegra20 Serial flash Controller interface. 400 + The main usecase of this controller is to use spi flash as boot 401 + device. 402 + 403 + config SPI_TEGRA20_SLINK 404 + tristate "Nvidia Tegra20/Tegra30 SLINK Controller" 405 + depends on ARCH_TEGRA && TEGRA20_APB_DMA 406 + help 407 + SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface. 408 + 395 409 config SPI_TI_SSP 396 410 tristate "TI Sequencer Serial Port - SPI Support" 397 411 depends on MFD_TI_SSP
+2 -1
drivers/spi/Makefile
··· 61 61 obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o 62 62 obj-$(CONFIG_SPI_SIRF) += spi-sirf.o 63 63 obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o 64 + obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o 65 + obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o 64 66 obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o 65 67 obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o 66 68 obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o 67 69 obj-$(CONFIG_SPI_TXX9) += spi-txx9.o 68 70 obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o 69 71 obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o 70 -
+6 -10
drivers/spi/spi-bcm63xx.c
··· 36 36 #include <bcm63xx_dev_spi.h> 37 37 38 38 #define PFX KBUILD_MODNAME 39 - #define DRV_VER "0.1.2" 40 39 41 40 struct bcm63xx_spi { 42 41 struct completion done; ··· 167 168 dev_err(&spi->dev, "%s, unsupported mode bits %x\n", 168 169 __func__, spi->mode & ~MODEBITS); 169 170 return -EINVAL; 170 - } 171 - 172 - ret = bcm63xx_spi_check_transfer(spi, NULL); 173 - if (ret < 0) { 174 - dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 175 - spi->mode & ~MODEBITS); 176 - return ret; 177 171 } 178 172 179 173 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n", ··· 433 441 goto out_clk_disable; 434 442 } 435 443 436 - dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d) v%s\n", 437 - r->start, irq, bs->fifo_size, DRV_VER); 444 + dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n", 445 + r->start, irq, bs->fifo_size); 438 446 439 447 return 0; 440 448 ··· 477 485 platform_get_drvdata(to_platform_device(dev)); 478 486 struct bcm63xx_spi *bs = spi_master_get_devdata(master); 479 487 488 + spi_master_suspend(master); 489 + 480 490 clk_disable(bs->clk); 481 491 482 492 return 0; ··· 491 497 struct bcm63xx_spi *bs = spi_master_get_devdata(master); 492 498 493 499 clk_enable(bs->clk); 500 + 501 + spi_master_resume(master); 494 502 495 503 return 0; 496 504 }
+36 -29
drivers/spi/spi-omap2-mcspi.c
··· 129 129 struct omap2_mcspi_dma *dma_channels; 130 130 struct device *dev; 131 131 struct omap2_mcspi_regs ctx; 132 + unsigned int pin_dir:1; 132 133 }; 133 134 134 135 struct omap2_mcspi_cs { ··· 323 322 struct omap2_mcspi *mcspi; 324 323 struct omap2_mcspi_dma *mcspi_dma; 325 324 unsigned int count; 326 - u8 * rx; 327 - const u8 * tx; 328 - void __iomem *chstat_reg; 329 - struct omap2_mcspi_cs *cs = spi->controller_state; 330 325 331 326 mcspi = spi_master_get_devdata(spi->master); 332 327 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 333 328 count = xfer->len; 334 - 335 - rx = xfer->rx_buf; 336 - tx = xfer->tx_buf; 337 - chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; 338 329 339 330 if (mcspi_dma->dma_tx) { 340 331 struct dma_async_tx_descriptor *tx; ··· 351 358 dma_async_issue_pending(mcspi_dma->dma_tx); 352 359 omap2_mcspi_set_dma_req(spi, 0, 1); 353 360 354 - wait_for_completion(&mcspi_dma->dma_tx_completion); 355 - dma_unmap_single(mcspi->dev, xfer->tx_dma, count, 356 - DMA_TO_DEVICE); 357 - 358 - /* for TX_ONLY mode, be sure all words have shifted out */ 359 - if (rx == NULL) { 360 - if (mcspi_wait_for_reg_bit(chstat_reg, 361 - OMAP2_MCSPI_CHSTAT_TXS) < 0) 362 - dev_err(&spi->dev, "TXS timed out\n"); 363 - else if (mcspi_wait_for_reg_bit(chstat_reg, 364 - OMAP2_MCSPI_CHSTAT_EOT) < 0) 365 - dev_err(&spi->dev, "EOT timed out\n"); 366 - } 367 361 } 368 362 369 363 static unsigned ··· 471 491 struct dma_slave_config cfg; 472 492 enum dma_slave_buswidth width; 473 493 unsigned es; 494 + void __iomem *chstat_reg; 474 495 475 496 mcspi = spi_master_get_devdata(spi->master); 476 497 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; ··· 506 525 omap2_mcspi_tx_dma(spi, xfer, cfg); 507 526 508 527 if (rx != NULL) 509 - return omap2_mcspi_rx_dma(spi, xfer, cfg, es); 528 + count = omap2_mcspi_rx_dma(spi, xfer, cfg, es); 510 529 530 + if (tx != NULL) { 531 + chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; 532 + wait_for_completion(&mcspi_dma->dma_tx_completion); 533 + dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len, 534 + DMA_TO_DEVICE); 535 + 536 + /* for TX_ONLY mode, be sure all words have shifted out */ 537 + if (rx == NULL) { 538 + if (mcspi_wait_for_reg_bit(chstat_reg, 539 + OMAP2_MCSPI_CHSTAT_TXS) < 0) 540 + dev_err(&spi->dev, "TXS timed out\n"); 541 + else if (mcspi_wait_for_reg_bit(chstat_reg, 542 + OMAP2_MCSPI_CHSTAT_EOT) < 0) 543 + dev_err(&spi->dev, "EOT timed out\n"); 544 + } 545 + } 511 546 return count; 512 547 } 513 548 ··· 761 764 /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS 762 765 * REVISIT: this controller could support SPI_3WIRE mode. 763 766 */ 764 - l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1); 765 - l |= OMAP2_MCSPI_CHCONF_DPE0; 767 + if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) { 768 + l &= ~OMAP2_MCSPI_CHCONF_IS; 769 + l &= ~OMAP2_MCSPI_CHCONF_DPE1; 770 + l |= OMAP2_MCSPI_CHCONF_DPE0; 771 + } else { 772 + l |= OMAP2_MCSPI_CHCONF_IS; 773 + l |= OMAP2_MCSPI_CHCONF_DPE1; 774 + l &= ~OMAP2_MCSPI_CHCONF_DPE0; 775 + } 766 776 767 777 /* wordlength */ 768 778 l &= ~OMAP2_MCSPI_CHCONF_WL_MASK; ··· 1170 1166 master->cleanup = omap2_mcspi_cleanup; 1171 1167 master->dev.of_node = node; 1172 1168 1169 + dev_set_drvdata(&pdev->dev, master); 1170 + 1171 + mcspi = spi_master_get_devdata(master); 1172 + mcspi->master = master; 1173 + 1173 1174 match = of_match_device(omap_mcspi_of_match, &pdev->dev); 1174 1175 if (match) { 1175 1176 u32 num_cs = 1; /* default number of chipselect */ ··· 1183 1174 of_property_read_u32(node, "ti,spi-num-cs", &num_cs); 1184 1175 master->num_chipselect = num_cs; 1185 1176 master->bus_num = bus_num++; 1177 + if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL)) 1178 + mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN; 1186 1179 } else { 1187 1180 pdata = pdev->dev.platform_data; 1188 1181 master->num_chipselect = pdata->num_cs; 1189 1182 if (pdev->id != -1) 1190 1183 master->bus_num = pdev->id; 1184 + mcspi->pin_dir = pdata->pin_dir; 1191 1185 } 1192 1186 regs_offset = pdata->regs_offset; 1193 - 1194 - dev_set_drvdata(&pdev->dev, master); 1195 - 1196 - mcspi = spi_master_get_devdata(master); 1197 - mcspi->master = master; 1198 1187 1199 1188 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1200 1189 if (r == NULL) {
+36 -16
drivers/spi/spi-pl022.c
··· 371 371 /* Two optional pin states - default & sleep */ 372 372 struct pinctrl *pinctrl; 373 373 struct pinctrl_state *pins_default; 374 + struct pinctrl_state *pins_idle; 374 375 struct pinctrl_state *pins_sleep; 375 376 struct spi_master *master; 376 377 struct pl022_ssp_controller *master_info; ··· 2117 2116 } else 2118 2117 dev_err(dev, "could not get default pinstate\n"); 2119 2118 2119 + pl022->pins_idle = pinctrl_lookup_state(pl022->pinctrl, 2120 + PINCTRL_STATE_IDLE); 2121 + if (IS_ERR(pl022->pins_idle)) 2122 + dev_dbg(dev, "could not get idle pinstate\n"); 2123 + 2120 2124 pl022->pins_sleep = pinctrl_lookup_state(pl022->pinctrl, 2121 2125 PINCTRL_STATE_SLEEP); 2122 2126 if (IS_ERR(pl022->pins_sleep)) ··· 2252 2246 pm_runtime_set_autosuspend_delay(dev, 2253 2247 platform_info->autosuspend_delay); 2254 2248 pm_runtime_use_autosuspend(dev); 2255 - pm_runtime_put_autosuspend(dev); 2256 - } else { 2257 - pm_runtime_put(dev); 2258 2249 } 2250 + pm_runtime_put(dev); 2251 + 2259 2252 return 0; 2260 2253 2261 2254 err_spi_register: ··· 2308 2303 * the runtime counterparts to handle external resources like 2309 2304 * clocks, pins and regulators when going to sleep. 2310 2305 */ 2311 - static void pl022_suspend_resources(struct pl022 *pl022) 2306 + static void pl022_suspend_resources(struct pl022 *pl022, bool runtime) 2312 2307 { 2313 2308 int ret; 2309 + struct pinctrl_state *pins_state; 2314 2310 2315 2311 clk_disable(pl022->clk); 2316 2312 2313 + pins_state = runtime ? pl022->pins_idle : pl022->pins_sleep; 2317 2314 /* Optionally let pins go into sleep states */ 2318 - if (!IS_ERR(pl022->pins_sleep)) { 2319 - ret = pinctrl_select_state(pl022->pinctrl, 2320 - pl022->pins_sleep); 2315 + if (!IS_ERR(pins_state)) { 2316 + ret = pinctrl_select_state(pl022->pinctrl, pins_state); 2321 2317 if (ret) 2322 - dev_err(&pl022->adev->dev, 2323 - "could not set pins to sleep state\n"); 2318 + dev_err(&pl022->adev->dev, "could not set %s pins\n", 2319 + runtime ? "idle" : "sleep"); 2324 2320 } 2325 2321 } 2326 2322 2327 - static void pl022_resume_resources(struct pl022 *pl022) 2323 + static void pl022_resume_resources(struct pl022 *pl022, bool runtime) 2328 2324 { 2329 2325 int ret; 2330 2326 2331 2327 /* Optionaly enable pins to be muxed in and configured */ 2328 + /* First go to the default state */ 2332 2329 if (!IS_ERR(pl022->pins_default)) { 2333 - ret = pinctrl_select_state(pl022->pinctrl, 2334 - pl022->pins_default); 2330 + ret = pinctrl_select_state(pl022->pinctrl, pl022->pins_default); 2335 2331 if (ret) 2336 2332 dev_err(&pl022->adev->dev, 2337 2333 "could not set default pins\n"); 2334 + } 2335 + 2336 + if (!runtime) { 2337 + /* Then let's idle the pins until the next transfer happens */ 2338 + if (!IS_ERR(pl022->pins_idle)) { 2339 + ret = pinctrl_select_state(pl022->pinctrl, 2340 + pl022->pins_idle); 2341 + if (ret) 2342 + dev_err(&pl022->adev->dev, 2343 + "could not set idle pins\n"); 2344 + } 2338 2345 } 2339 2346 2340 2347 clk_enable(pl022->clk); ··· 2364 2347 dev_warn(dev, "cannot suspend master\n"); 2365 2348 return ret; 2366 2349 } 2367 - pl022_suspend_resources(pl022); 2350 + 2351 + pm_runtime_get_sync(dev); 2352 + pl022_suspend_resources(pl022, false); 2368 2353 2369 2354 dev_dbg(dev, "suspended\n"); 2370 2355 return 0; ··· 2377 2358 struct pl022 *pl022 = dev_get_drvdata(dev); 2378 2359 int ret; 2379 2360 2380 - pl022_resume_resources(pl022); 2361 + pl022_resume_resources(pl022, false); 2362 + pm_runtime_put(dev); 2381 2363 2382 2364 /* Start the queue running */ 2383 2365 ret = spi_master_resume(pl022->master); ··· 2396 2376 { 2397 2377 struct pl022 *pl022 = dev_get_drvdata(dev); 2398 2378 2399 - pl022_suspend_resources(pl022); 2379 + pl022_suspend_resources(pl022, true); 2400 2380 return 0; 2401 2381 } 2402 2382 ··· 2404 2384 { 2405 2385 struct pl022 *pl022 = dev_get_drvdata(dev); 2406 2386 2407 - pl022_resume_resources(pl022); 2387 + pl022_resume_resources(pl022, true); 2408 2388 return 0; 2409 2389 } 2410 2390 #endif
+17 -17
drivers/spi/spi-s3c64xx.c
··· 516 516 517 517 /* Disable Clock */ 518 518 if (sdd->port_conf->clk_from_cmu) { 519 - clk_disable(sdd->src_clk); 519 + clk_disable_unprepare(sdd->src_clk); 520 520 } else { 521 521 val = readl(regs + S3C64XX_SPI_CLK_CFG); 522 522 val &= ~S3C64XX_SPI_ENCLK_ENABLE; ··· 564 564 /* There is half-multiplier before the SPI */ 565 565 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); 566 566 /* Enable Clock */ 567 - clk_enable(sdd->src_clk); 567 + clk_prepare_enable(sdd->src_clk); 568 568 } else { 569 569 /* Configure Clock */ 570 570 val = readl(regs + S3C64XX_SPI_CLK_CFG); ··· 1112 1112 dev_err(dev, "invalid gpio[%d]: %d\n", idx, gpio); 1113 1113 goto free_gpio; 1114 1114 } 1115 - 1115 + sdd->gpios[idx] = gpio; 1116 1116 ret = gpio_request(gpio, "spi-bus"); 1117 1117 if (ret) { 1118 1118 dev_err(dev, "gpio [%d] request failed: %d\n", ··· 1302 1302 goto err3; 1303 1303 } 1304 1304 1305 - if (clk_enable(sdd->clk)) { 1305 + if (clk_prepare_enable(sdd->clk)) { 1306 1306 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); 1307 1307 ret = -EBUSY; 1308 1308 goto err4; ··· 1317 1317 goto err5; 1318 1318 } 1319 1319 1320 - if (clk_enable(sdd->src_clk)) { 1320 + if (clk_prepare_enable(sdd->src_clk)) { 1321 1321 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name); 1322 1322 ret = -EBUSY; 1323 1323 goto err6; ··· 1361 1361 err8: 1362 1362 free_irq(irq, sdd); 1363 1363 err7: 1364 - clk_disable(sdd->src_clk); 1364 + clk_disable_unprepare(sdd->src_clk); 1365 1365 err6: 1366 1366 clk_put(sdd->src_clk); 1367 1367 err5: 1368 - clk_disable(sdd->clk); 1368 + clk_disable_unprepare(sdd->clk); 1369 1369 err4: 1370 1370 clk_put(sdd->clk); 1371 1371 err3: ··· 1393 1393 1394 1394 free_irq(platform_get_irq(pdev, 0), sdd); 1395 1395 1396 - clk_disable(sdd->src_clk); 1396 + clk_disable_unprepare(sdd->src_clk); 1397 1397 clk_put(sdd->src_clk); 1398 1398 1399 - clk_disable(sdd->clk); 1399 + clk_disable_unprepare(sdd->clk); 1400 1400 clk_put(sdd->clk); 1401 1401 1402 1402 if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node) ··· 1417 1417 spi_master_suspend(master); 1418 1418 1419 1419 /* Disable the clock */ 1420 - clk_disable(sdd->src_clk); 1421 - clk_disable(sdd->clk); 1420 + clk_disable_unprepare(sdd->src_clk); 1421 + clk_disable_unprepare(sdd->clk); 1422 1422 1423 1423 if (!sdd->cntrlr_info->cfg_gpio && dev->of_node) 1424 1424 s3c64xx_spi_dt_gpio_free(sdd); ··· 1440 1440 sci->cfg_gpio(); 1441 1441 1442 1442 /* Enable the clock */ 1443 - clk_enable(sdd->src_clk); 1444 - clk_enable(sdd->clk); 1443 + clk_prepare_enable(sdd->src_clk); 1444 + clk_prepare_enable(sdd->clk); 1445 1445 1446 1446 s3c64xx_spi_hwinit(sdd, sdd->port_id); 1447 1447 ··· 1457 1457 struct spi_master *master = dev_get_drvdata(dev); 1458 1458 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1459 1459 1460 - clk_disable(sdd->clk); 1461 - clk_disable(sdd->src_clk); 1460 + clk_disable_unprepare(sdd->clk); 1461 + clk_disable_unprepare(sdd->src_clk); 1462 1462 1463 1463 return 0; 1464 1464 } ··· 1468 1468 struct spi_master *master = dev_get_drvdata(dev); 1469 1469 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1470 1470 1471 - clk_enable(sdd->src_clk); 1472 - clk_enable(sdd->clk); 1471 + clk_prepare_enable(sdd->src_clk); 1472 + clk_prepare_enable(sdd->clk); 1473 1473 1474 1474 return 0; 1475 1475 }
+665
drivers/spi/spi-tegra20-sflash.c
··· 1 + /* 2 + * SPI driver for Nvidia's Tegra20 Serial Flash Controller. 3 + * 4 + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 5 + * 6 + * Author: Laxman Dewangan <ldewangan@nvidia.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms and conditions of the GNU General Public License, 10 + * version 2, as published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 + */ 20 + 21 + #include <linux/clk.h> 22 + #include <linux/completion.h> 23 + #include <linux/delay.h> 24 + #include <linux/err.h> 25 + #include <linux/init.h> 26 + #include <linux/interrupt.h> 27 + #include <linux/io.h> 28 + #include <linux/kernel.h> 29 + #include <linux/kthread.h> 30 + #include <linux/module.h> 31 + #include <linux/platform_device.h> 32 + #include <linux/pm_runtime.h> 33 + #include <linux/of.h> 34 + #include <linux/of_device.h> 35 + #include <linux/spi/spi.h> 36 + #include <linux/spi/spi-tegra.h> 37 + #include <mach/clk.h> 38 + 39 + #define SPI_COMMAND 0x000 40 + #define SPI_GO BIT(30) 41 + #define SPI_M_S BIT(28) 42 + #define SPI_ACTIVE_SCLK_MASK (0x3 << 26) 43 + #define SPI_ACTIVE_SCLK_DRIVE_LOW (0 << 26) 44 + #define SPI_ACTIVE_SCLK_DRIVE_HIGH (1 << 26) 45 + #define SPI_ACTIVE_SCLK_PULL_LOW (2 << 26) 46 + #define SPI_ACTIVE_SCLK_PULL_HIGH (3 << 26) 47 + 48 + #define SPI_CK_SDA_FALLING (1 << 21) 49 + #define SPI_CK_SDA_RISING (0 << 21) 50 + #define SPI_CK_SDA_MASK (1 << 21) 51 + #define SPI_ACTIVE_SDA (0x3 << 18) 52 + #define SPI_ACTIVE_SDA_DRIVE_LOW (0 << 18) 53 + #define SPI_ACTIVE_SDA_DRIVE_HIGH (1 << 18) 54 + #define SPI_ACTIVE_SDA_PULL_LOW (2 << 18) 55 + #define SPI_ACTIVE_SDA_PULL_HIGH (3 << 18) 56 + 57 + #define SPI_CS_POL_INVERT BIT(16) 58 + #define SPI_TX_EN BIT(15) 59 + #define SPI_RX_EN BIT(14) 60 + #define SPI_CS_VAL_HIGH BIT(13) 61 + #define SPI_CS_VAL_LOW 0x0 62 + #define SPI_CS_SW BIT(12) 63 + #define SPI_CS_HW 0x0 64 + #define SPI_CS_DELAY_MASK (7 << 9) 65 + #define SPI_CS3_EN BIT(8) 66 + #define SPI_CS2_EN BIT(7) 67 + #define SPI_CS1_EN BIT(6) 68 + #define SPI_CS0_EN BIT(5) 69 + 70 + #define SPI_CS_MASK (SPI_CS3_EN | SPI_CS2_EN | \ 71 + SPI_CS1_EN | SPI_CS0_EN) 72 + #define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0) 73 + 74 + #define SPI_MODES (SPI_ACTIVE_SCLK_MASK | SPI_CK_SDA_MASK) 75 + 76 + #define SPI_STATUS 0x004 77 + #define SPI_BSY BIT(31) 78 + #define SPI_RDY BIT(30) 79 + #define SPI_TXF_FLUSH BIT(29) 80 + #define SPI_RXF_FLUSH BIT(28) 81 + #define SPI_RX_UNF BIT(27) 82 + #define SPI_TX_OVF BIT(26) 83 + #define SPI_RXF_EMPTY BIT(25) 84 + #define SPI_RXF_FULL BIT(24) 85 + #define SPI_TXF_EMPTY BIT(23) 86 + #define SPI_TXF_FULL BIT(22) 87 + #define SPI_BLK_CNT(count) (((count) & 0xffff) + 1) 88 + 89 + #define SPI_FIFO_ERROR (SPI_RX_UNF | SPI_TX_OVF) 90 + #define SPI_FIFO_EMPTY (SPI_TX_EMPTY | SPI_RX_EMPTY) 91 + 92 + #define SPI_RX_CMP 0x8 93 + #define SPI_DMA_CTL 0x0C 94 + #define SPI_DMA_EN BIT(31) 95 + #define SPI_IE_RXC BIT(27) 96 + #define SPI_IE_TXC BIT(26) 97 + #define SPI_PACKED BIT(20) 98 + #define SPI_RX_TRIG_MASK (0x3 << 18) 99 + #define SPI_RX_TRIG_1W (0x0 << 18) 100 + #define SPI_RX_TRIG_4W (0x1 << 18) 101 + #define SPI_TX_TRIG_MASK (0x3 << 16) 102 + #define SPI_TX_TRIG_1W (0x0 << 16) 103 + #define SPI_TX_TRIG_4W (0x1 << 16) 104 + #define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF); 105 + 106 + #define SPI_TX_FIFO 0x10 107 + #define SPI_RX_FIFO 0x20 108 + 109 + #define DATA_DIR_TX (1 << 0) 110 + #define DATA_DIR_RX (1 << 1) 111 + 112 + #define MAX_CHIP_SELECT 4 113 + #define SPI_FIFO_DEPTH 4 114 + #define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) 115 + 116 + struct tegra_sflash_data { 117 + struct device *dev; 118 + struct spi_master *master; 119 + spinlock_t lock; 120 + 121 + struct clk *clk; 122 + void __iomem *base; 123 + unsigned irq; 124 + u32 spi_max_frequency; 125 + u32 cur_speed; 126 + 127 + struct spi_device *cur_spi; 128 + unsigned cur_pos; 129 + unsigned cur_len; 130 + unsigned bytes_per_word; 131 + unsigned cur_direction; 132 + unsigned curr_xfer_words; 133 + 134 + unsigned cur_rx_pos; 135 + unsigned cur_tx_pos; 136 + 137 + u32 tx_status; 138 + u32 rx_status; 139 + u32 status_reg; 140 + 141 + u32 def_command_reg; 142 + u32 command_reg; 143 + u32 dma_control_reg; 144 + 145 + struct completion xfer_completion; 146 + struct spi_transfer *curr_xfer; 147 + }; 148 + 149 + static int tegra_sflash_runtime_suspend(struct device *dev); 150 + static int tegra_sflash_runtime_resume(struct device *dev); 151 + 152 + static inline unsigned long tegra_sflash_readl(struct tegra_sflash_data *tsd, 153 + unsigned long reg) 154 + { 155 + return readl(tsd->base + reg); 156 + } 157 + 158 + static inline void tegra_sflash_writel(struct tegra_sflash_data *tsd, 159 + unsigned long val, unsigned long reg) 160 + { 161 + writel(val, tsd->base + reg); 162 + } 163 + 164 + static void tegra_sflash_clear_status(struct tegra_sflash_data *tsd) 165 + { 166 + /* Write 1 to clear status register */ 167 + tegra_sflash_writel(tsd, SPI_RDY | SPI_FIFO_ERROR, SPI_STATUS); 168 + } 169 + 170 + static unsigned tegra_sflash_calculate_curr_xfer_param( 171 + struct spi_device *spi, struct tegra_sflash_data *tsd, 172 + struct spi_transfer *t) 173 + { 174 + unsigned remain_len = t->len - tsd->cur_pos; 175 + unsigned max_word; 176 + 177 + tsd->bytes_per_word = (t->bits_per_word - 1) / 8 + 1; 178 + max_word = remain_len / tsd->bytes_per_word; 179 + if (max_word > SPI_FIFO_DEPTH) 180 + max_word = SPI_FIFO_DEPTH; 181 + tsd->curr_xfer_words = max_word; 182 + return max_word; 183 + } 184 + 185 + static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf( 186 + struct tegra_sflash_data *tsd, struct spi_transfer *t) 187 + { 188 + unsigned nbytes; 189 + unsigned long status; 190 + unsigned max_n_32bit = tsd->curr_xfer_words; 191 + u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos; 192 + 193 + if (max_n_32bit > SPI_FIFO_DEPTH) 194 + max_n_32bit = SPI_FIFO_DEPTH; 195 + nbytes = max_n_32bit * tsd->bytes_per_word; 196 + 197 + status = tegra_sflash_readl(tsd, SPI_STATUS); 198 + while (!(status & SPI_TXF_FULL)) { 199 + int i; 200 + unsigned int x = 0; 201 + 202 + for (i = 0; nbytes && (i < tsd->bytes_per_word); 203 + i++, nbytes--) 204 + x |= ((*tx_buf++) << i*8); 205 + tegra_sflash_writel(tsd, x, SPI_TX_FIFO); 206 + if (!nbytes) 207 + break; 208 + 209 + status = tegra_sflash_readl(tsd, SPI_STATUS); 210 + } 211 + tsd->cur_tx_pos += max_n_32bit * tsd->bytes_per_word; 212 + return max_n_32bit; 213 + } 214 + 215 + static int tegra_sflash_read_rx_fifo_to_client_rxbuf( 216 + struct tegra_sflash_data *tsd, struct spi_transfer *t) 217 + { 218 + unsigned long status; 219 + unsigned int read_words = 0; 220 + u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos; 221 + 222 + status = tegra_sflash_readl(tsd, SPI_STATUS); 223 + while (!(status & SPI_RXF_EMPTY)) { 224 + int i; 225 + unsigned long x; 226 + 227 + x = tegra_sflash_readl(tsd, SPI_RX_FIFO); 228 + for (i = 0; (i < tsd->bytes_per_word); i++) 229 + *rx_buf++ = (x >> (i*8)) & 0xFF; 230 + read_words++; 231 + status = tegra_sflash_readl(tsd, SPI_STATUS); 232 + } 233 + tsd->cur_rx_pos += read_words * tsd->bytes_per_word; 234 + return 0; 235 + } 236 + 237 + static int tegra_sflash_start_cpu_based_transfer( 238 + struct tegra_sflash_data *tsd, struct spi_transfer *t) 239 + { 240 + unsigned long val = 0; 241 + unsigned cur_words; 242 + 243 + if (tsd->cur_direction & DATA_DIR_TX) 244 + val |= SPI_IE_TXC; 245 + 246 + if (tsd->cur_direction & DATA_DIR_RX) 247 + val |= SPI_IE_RXC; 248 + 249 + tegra_sflash_writel(tsd, val, SPI_DMA_CTL); 250 + tsd->dma_control_reg = val; 251 + 252 + if (tsd->cur_direction & DATA_DIR_TX) 253 + cur_words = tegra_sflash_fill_tx_fifo_from_client_txbuf(tsd, t); 254 + else 255 + cur_words = tsd->curr_xfer_words; 256 + val |= SPI_DMA_BLK_COUNT(cur_words); 257 + tegra_sflash_writel(tsd, val, SPI_DMA_CTL); 258 + tsd->dma_control_reg = val; 259 + val |= SPI_DMA_EN; 260 + tegra_sflash_writel(tsd, val, SPI_DMA_CTL); 261 + return 0; 262 + } 263 + 264 + static int tegra_sflash_start_transfer_one(struct spi_device *spi, 265 + struct spi_transfer *t, bool is_first_of_msg, 266 + bool is_single_xfer) 267 + { 268 + struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master); 269 + u32 speed; 270 + unsigned long command; 271 + 272 + speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz; 273 + if (!speed) 274 + speed = tsd->spi_max_frequency; 275 + if (speed != tsd->cur_speed) { 276 + clk_set_rate(tsd->clk, speed); 277 + tsd->cur_speed = speed; 278 + } 279 + 280 + tsd->cur_spi = spi; 281 + tsd->cur_pos = 0; 282 + tsd->cur_rx_pos = 0; 283 + tsd->cur_tx_pos = 0; 284 + tsd->curr_xfer = t; 285 + tegra_sflash_calculate_curr_xfer_param(spi, tsd, t); 286 + if (is_first_of_msg) { 287 + command = tsd->def_command_reg; 288 + command |= SPI_BIT_LENGTH(t->bits_per_word - 1); 289 + command |= SPI_CS_VAL_HIGH; 290 + 291 + command &= ~SPI_MODES; 292 + if (spi->mode & SPI_CPHA) 293 + command |= SPI_CK_SDA_FALLING; 294 + 295 + if (spi->mode & SPI_CPOL) 296 + command |= SPI_ACTIVE_SCLK_DRIVE_HIGH; 297 + else 298 + command |= SPI_ACTIVE_SCLK_DRIVE_LOW; 299 + command |= SPI_CS0_EN << spi->chip_select; 300 + } else { 301 + command = tsd->command_reg; 302 + command &= ~SPI_BIT_LENGTH(~0); 303 + command |= SPI_BIT_LENGTH(t->bits_per_word - 1); 304 + command &= ~(SPI_RX_EN | SPI_TX_EN); 305 + } 306 + 307 + tsd->cur_direction = 0; 308 + if (t->rx_buf) { 309 + command |= SPI_RX_EN; 310 + tsd->cur_direction |= DATA_DIR_RX; 311 + } 312 + if (t->tx_buf) { 313 + command |= SPI_TX_EN; 314 + tsd->cur_direction |= DATA_DIR_TX; 315 + } 316 + tegra_sflash_writel(tsd, command, SPI_COMMAND); 317 + tsd->command_reg = command; 318 + 319 + return tegra_sflash_start_cpu_based_transfer(tsd, t); 320 + } 321 + 322 + static int tegra_sflash_transfer_one_message(struct spi_master *master, 323 + struct spi_message *msg) 324 + { 325 + bool is_first_msg = true; 326 + int single_xfer; 327 + struct tegra_sflash_data *tsd = spi_master_get_devdata(master); 328 + struct spi_transfer *xfer; 329 + struct spi_device *spi = msg->spi; 330 + int ret; 331 + 332 + ret = pm_runtime_get_sync(tsd->dev); 333 + if (ret < 0) { 334 + dev_err(tsd->dev, "pm_runtime_get() failed, err = %d\n", ret); 335 + return ret; 336 + } 337 + 338 + msg->status = 0; 339 + msg->actual_length = 0; 340 + single_xfer = list_is_singular(&msg->transfers); 341 + list_for_each_entry(xfer, &msg->transfers, transfer_list) { 342 + INIT_COMPLETION(tsd->xfer_completion); 343 + ret = tegra_sflash_start_transfer_one(spi, xfer, 344 + is_first_msg, single_xfer); 345 + if (ret < 0) { 346 + dev_err(tsd->dev, 347 + "spi can not start transfer, err %d\n", ret); 348 + goto exit; 349 + } 350 + is_first_msg = false; 351 + ret = wait_for_completion_timeout(&tsd->xfer_completion, 352 + SPI_DMA_TIMEOUT); 353 + if (WARN_ON(ret == 0)) { 354 + dev_err(tsd->dev, 355 + "spi trasfer timeout, err %d\n", ret); 356 + ret = -EIO; 357 + goto exit; 358 + } 359 + 360 + if (tsd->tx_status || tsd->rx_status) { 361 + dev_err(tsd->dev, "Error in Transfer\n"); 362 + ret = -EIO; 363 + goto exit; 364 + } 365 + msg->actual_length += xfer->len; 366 + if (xfer->cs_change && xfer->delay_usecs) { 367 + tegra_sflash_writel(tsd, tsd->def_command_reg, 368 + SPI_COMMAND); 369 + udelay(xfer->delay_usecs); 370 + } 371 + } 372 + ret = 0; 373 + exit: 374 + tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND); 375 + msg->status = ret; 376 + spi_finalize_current_message(master); 377 + pm_runtime_put(tsd->dev); 378 + return ret; 379 + } 380 + 381 + static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd) 382 + { 383 + struct spi_transfer *t = tsd->curr_xfer; 384 + unsigned long flags; 385 + 386 + spin_lock_irqsave(&tsd->lock, flags); 387 + if (tsd->tx_status || tsd->rx_status || (tsd->status_reg & SPI_BSY)) { 388 + dev_err(tsd->dev, 389 + "CpuXfer ERROR bit set 0x%x\n", tsd->status_reg); 390 + dev_err(tsd->dev, 391 + "CpuXfer 0x%08x:0x%08x\n", tsd->command_reg, 392 + tsd->dma_control_reg); 393 + tegra_periph_reset_assert(tsd->clk); 394 + udelay(2); 395 + tegra_periph_reset_deassert(tsd->clk); 396 + complete(&tsd->xfer_completion); 397 + goto exit; 398 + } 399 + 400 + if (tsd->cur_direction & DATA_DIR_RX) 401 + tegra_sflash_read_rx_fifo_to_client_rxbuf(tsd, t); 402 + 403 + if (tsd->cur_direction & DATA_DIR_TX) 404 + tsd->cur_pos = tsd->cur_tx_pos; 405 + else 406 + tsd->cur_pos = tsd->cur_rx_pos; 407 + 408 + if (tsd->cur_pos == t->len) { 409 + complete(&tsd->xfer_completion); 410 + goto exit; 411 + } 412 + 413 + tegra_sflash_calculate_curr_xfer_param(tsd->cur_spi, tsd, t); 414 + tegra_sflash_start_cpu_based_transfer(tsd, t); 415 + exit: 416 + spin_unlock_irqrestore(&tsd->lock, flags); 417 + return IRQ_HANDLED; 418 + } 419 + 420 + static irqreturn_t tegra_sflash_isr(int irq, void *context_data) 421 + { 422 + struct tegra_sflash_data *tsd = context_data; 423 + 424 + tsd->status_reg = tegra_sflash_readl(tsd, SPI_STATUS); 425 + if (tsd->cur_direction & DATA_DIR_TX) 426 + tsd->tx_status = tsd->status_reg & SPI_TX_OVF; 427 + 428 + if (tsd->cur_direction & DATA_DIR_RX) 429 + tsd->rx_status = tsd->status_reg & SPI_RX_UNF; 430 + tegra_sflash_clear_status(tsd); 431 + 432 + return handle_cpu_based_xfer(tsd); 433 + } 434 + 435 + static struct tegra_spi_platform_data *tegra_sflash_parse_dt( 436 + struct platform_device *pdev) 437 + { 438 + struct tegra_spi_platform_data *pdata; 439 + struct device_node *np = pdev->dev.of_node; 440 + u32 max_freq; 441 + 442 + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 443 + if (!pdata) { 444 + dev_err(&pdev->dev, "Memory alloc for pdata failed\n"); 445 + return NULL; 446 + } 447 + 448 + if (!of_property_read_u32(np, "spi-max-frequency", &max_freq)) 449 + pdata->spi_max_frequency = max_freq; 450 + 451 + return pdata; 452 + } 453 + 454 + static struct of_device_id tegra_sflash_of_match[] __devinitconst = { 455 + { .compatible = "nvidia,tegra20-sflash", }, 456 + {} 457 + }; 458 + MODULE_DEVICE_TABLE(of, tegra_sflash_of_match); 459 + 460 + static int __devinit tegra_sflash_probe(struct platform_device *pdev) 461 + { 462 + struct spi_master *master; 463 + struct tegra_sflash_data *tsd; 464 + struct resource *r; 465 + struct tegra_spi_platform_data *pdata = pdev->dev.platform_data; 466 + int ret; 467 + const struct of_device_id *match; 468 + 469 + match = of_match_device(of_match_ptr(tegra_sflash_of_match), 470 + &pdev->dev); 471 + if (!match) { 472 + dev_err(&pdev->dev, "Error: No device match found\n"); 473 + return -ENODEV; 474 + } 475 + 476 + if (!pdata && pdev->dev.of_node) 477 + pdata = tegra_sflash_parse_dt(pdev); 478 + 479 + if (!pdata) { 480 + dev_err(&pdev->dev, "No platform data, exiting\n"); 481 + return -ENODEV; 482 + } 483 + 484 + if (!pdata->spi_max_frequency) 485 + pdata->spi_max_frequency = 25000000; /* 25MHz */ 486 + 487 + master = spi_alloc_master(&pdev->dev, sizeof(*tsd)); 488 + if (!master) { 489 + dev_err(&pdev->dev, "master allocation failed\n"); 490 + return -ENOMEM; 491 + } 492 + 493 + /* the spi->mode bits understood by this driver: */ 494 + master->mode_bits = SPI_CPOL | SPI_CPHA; 495 + master->transfer_one_message = tegra_sflash_transfer_one_message; 496 + master->num_chipselect = MAX_CHIP_SELECT; 497 + master->bus_num = -1; 498 + 499 + dev_set_drvdata(&pdev->dev, master); 500 + tsd = spi_master_get_devdata(master); 501 + tsd->master = master; 502 + tsd->dev = &pdev->dev; 503 + spin_lock_init(&tsd->lock); 504 + 505 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 506 + if (!r) { 507 + dev_err(&pdev->dev, "No IO memory resource\n"); 508 + ret = -ENODEV; 509 + goto exit_free_master; 510 + } 511 + tsd->base = devm_request_and_ioremap(&pdev->dev, r); 512 + if (!tsd->base) { 513 + dev_err(&pdev->dev, 514 + "Cannot request memregion/iomap dma address\n"); 515 + ret = -EADDRNOTAVAIL; 516 + goto exit_free_master; 517 + } 518 + 519 + tsd->irq = platform_get_irq(pdev, 0); 520 + ret = request_irq(tsd->irq, tegra_sflash_isr, 0, 521 + dev_name(&pdev->dev), tsd); 522 + if (ret < 0) { 523 + dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", 524 + tsd->irq); 525 + goto exit_free_master; 526 + } 527 + 528 + tsd->clk = devm_clk_get(&pdev->dev, "spi"); 529 + if (IS_ERR(tsd->clk)) { 530 + dev_err(&pdev->dev, "can not get clock\n"); 531 + ret = PTR_ERR(tsd->clk); 532 + goto exit_free_irq; 533 + } 534 + 535 + tsd->spi_max_frequency = pdata->spi_max_frequency; 536 + init_completion(&tsd->xfer_completion); 537 + pm_runtime_enable(&pdev->dev); 538 + if (!pm_runtime_enabled(&pdev->dev)) { 539 + ret = tegra_sflash_runtime_resume(&pdev->dev); 540 + if (ret) 541 + goto exit_pm_disable; 542 + } 543 + 544 + ret = pm_runtime_get_sync(&pdev->dev); 545 + if (ret < 0) { 546 + dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret); 547 + goto exit_pm_disable; 548 + } 549 + 550 + /* Reset controller */ 551 + tegra_periph_reset_assert(tsd->clk); 552 + udelay(2); 553 + tegra_periph_reset_deassert(tsd->clk); 554 + 555 + tsd->def_command_reg = SPI_M_S | SPI_CS_SW; 556 + tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND); 557 + pm_runtime_put(&pdev->dev); 558 + 559 + master->dev.of_node = pdev->dev.of_node; 560 + ret = spi_register_master(master); 561 + if (ret < 0) { 562 + dev_err(&pdev->dev, "can not register to master err %d\n", ret); 563 + goto exit_pm_disable; 564 + } 565 + return ret; 566 + 567 + exit_pm_disable: 568 + pm_runtime_disable(&pdev->dev); 569 + if (!pm_runtime_status_suspended(&pdev->dev)) 570 + tegra_sflash_runtime_suspend(&pdev->dev); 571 + exit_free_irq: 572 + free_irq(tsd->irq, tsd); 573 + exit_free_master: 574 + spi_master_put(master); 575 + return ret; 576 + } 577 + 578 + static int __devexit tegra_sflash_remove(struct platform_device *pdev) 579 + { 580 + struct spi_master *master = dev_get_drvdata(&pdev->dev); 581 + struct tegra_sflash_data *tsd = spi_master_get_devdata(master); 582 + 583 + free_irq(tsd->irq, tsd); 584 + spi_unregister_master(master); 585 + 586 + pm_runtime_disable(&pdev->dev); 587 + if (!pm_runtime_status_suspended(&pdev->dev)) 588 + tegra_sflash_runtime_suspend(&pdev->dev); 589 + 590 + return 0; 591 + } 592 + 593 + #ifdef CONFIG_PM_SLEEP 594 + static int tegra_sflash_suspend(struct device *dev) 595 + { 596 + struct spi_master *master = dev_get_drvdata(dev); 597 + 598 + return spi_master_suspend(master); 599 + } 600 + 601 + static int tegra_sflash_resume(struct device *dev) 602 + { 603 + struct spi_master *master = dev_get_drvdata(dev); 604 + struct tegra_sflash_data *tsd = spi_master_get_devdata(master); 605 + int ret; 606 + 607 + ret = pm_runtime_get_sync(dev); 608 + if (ret < 0) { 609 + dev_err(dev, "pm runtime failed, e = %d\n", ret); 610 + return ret; 611 + } 612 + tegra_sflash_writel(tsd, tsd->command_reg, SPI_COMMAND); 613 + pm_runtime_put(dev); 614 + 615 + return spi_master_resume(master); 616 + } 617 + #endif 618 + 619 + static int tegra_sflash_runtime_suspend(struct device *dev) 620 + { 621 + struct spi_master *master = dev_get_drvdata(dev); 622 + struct tegra_sflash_data *tsd = spi_master_get_devdata(master); 623 + 624 + /* Flush all write which are in PPSB queue by reading back */ 625 + tegra_sflash_readl(tsd, SPI_COMMAND); 626 + 627 + clk_disable_unprepare(tsd->clk); 628 + return 0; 629 + } 630 + 631 + static int tegra_sflash_runtime_resume(struct device *dev) 632 + { 633 + struct spi_master *master = dev_get_drvdata(dev); 634 + struct tegra_sflash_data *tsd = spi_master_get_devdata(master); 635 + int ret; 636 + 637 + ret = clk_prepare_enable(tsd->clk); 638 + if (ret < 0) { 639 + dev_err(tsd->dev, "clk_prepare failed: %d\n", ret); 640 + return ret; 641 + } 642 + return 0; 643 + } 644 + 645 + static const struct dev_pm_ops slink_pm_ops = { 646 + SET_RUNTIME_PM_OPS(tegra_sflash_runtime_suspend, 647 + tegra_sflash_runtime_resume, NULL) 648 + SET_SYSTEM_SLEEP_PM_OPS(tegra_sflash_suspend, tegra_sflash_resume) 649 + }; 650 + static struct platform_driver tegra_sflash_driver = { 651 + .driver = { 652 + .name = "spi-tegra-sflash", 653 + .owner = THIS_MODULE, 654 + .pm = &slink_pm_ops, 655 + .of_match_table = of_match_ptr(tegra_sflash_of_match), 656 + }, 657 + .probe = tegra_sflash_probe, 658 + .remove = __devexit_p(tegra_sflash_remove), 659 + }; 660 + module_platform_driver(tegra_sflash_driver); 661 + 662 + MODULE_ALIAS("platform:spi-tegra-sflash"); 663 + MODULE_DESCRIPTION("NVIDIA Tegra20 Serial Flash Controller Driver"); 664 + MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); 665 + MODULE_LICENSE("GPL v2");
+1358
drivers/spi/spi-tegra20-slink.c
··· 1 + /* 2 + * SPI driver for Nvidia's Tegra20/Tegra30 SLINK Controller. 3 + * 4 + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include <linux/clk.h> 20 + #include <linux/completion.h> 21 + #include <linux/delay.h> 22 + #include <linux/dmaengine.h> 23 + #include <linux/dma-mapping.h> 24 + #include <linux/dmapool.h> 25 + #include <linux/err.h> 26 + #include <linux/init.h> 27 + #include <linux/interrupt.h> 28 + #include <linux/io.h> 29 + #include <linux/kernel.h> 30 + #include <linux/kthread.h> 31 + #include <linux/module.h> 32 + #include <linux/platform_device.h> 33 + #include <linux/pm_runtime.h> 34 + #include <linux/of.h> 35 + #include <linux/of_device.h> 36 + #include <linux/spi/spi.h> 37 + #include <linux/spi/spi-tegra.h> 38 + #include <mach/clk.h> 39 + 40 + #define SLINK_COMMAND 0x000 41 + #define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0) 42 + #define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5) 43 + #define SLINK_BOTH_EN (1 << 10) 44 + #define SLINK_CS_SW (1 << 11) 45 + #define SLINK_CS_VALUE (1 << 12) 46 + #define SLINK_CS_POLARITY (1 << 13) 47 + #define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16) 48 + #define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16) 49 + #define SLINK_IDLE_SDA_PULL_LOW (2 << 16) 50 + #define SLINK_IDLE_SDA_PULL_HIGH (3 << 16) 51 + #define SLINK_IDLE_SDA_MASK (3 << 16) 52 + #define SLINK_CS_POLARITY1 (1 << 20) 53 + #define SLINK_CK_SDA (1 << 21) 54 + #define SLINK_CS_POLARITY2 (1 << 22) 55 + #define SLINK_CS_POLARITY3 (1 << 23) 56 + #define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24) 57 + #define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24) 58 + #define SLINK_IDLE_SCLK_PULL_LOW (2 << 24) 59 + #define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24) 60 + #define SLINK_IDLE_SCLK_MASK (3 << 24) 61 + #define SLINK_M_S (1 << 28) 62 + #define SLINK_WAIT (1 << 29) 63 + #define SLINK_GO (1 << 30) 64 + #define SLINK_ENB (1 << 31) 65 + 66 + #define SLINK_MODES (SLINK_IDLE_SCLK_MASK | SLINK_CK_SDA) 67 + 68 + #define SLINK_COMMAND2 0x004 69 + #define SLINK_LSBFE (1 << 0) 70 + #define SLINK_SSOE (1 << 1) 71 + #define SLINK_SPIE (1 << 4) 72 + #define SLINK_BIDIROE (1 << 6) 73 + #define SLINK_MODFEN (1 << 7) 74 + #define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8) 75 + #define SLINK_CS_ACTIVE_BETWEEN (1 << 17) 76 + #define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18) 77 + #define SLINK_SS_SETUP(x) (((x) & 0x3) << 20) 78 + #define SLINK_FIFO_REFILLS_0 (0 << 22) 79 + #define SLINK_FIFO_REFILLS_1 (1 << 22) 80 + #define SLINK_FIFO_REFILLS_2 (2 << 22) 81 + #define SLINK_FIFO_REFILLS_3 (3 << 22) 82 + #define SLINK_FIFO_REFILLS_MASK (3 << 22) 83 + #define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26) 84 + #define SLINK_SPC0 (1 << 29) 85 + #define SLINK_TXEN (1 << 30) 86 + #define SLINK_RXEN (1 << 31) 87 + 88 + #define SLINK_STATUS 0x008 89 + #define SLINK_COUNT(val) (((val) >> 0) & 0x1f) 90 + #define SLINK_WORD(val) (((val) >> 5) & 0x1f) 91 + #define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff) 92 + #define SLINK_MODF (1 << 16) 93 + #define SLINK_RX_UNF (1 << 18) 94 + #define SLINK_TX_OVF (1 << 19) 95 + #define SLINK_TX_FULL (1 << 20) 96 + #define SLINK_TX_EMPTY (1 << 21) 97 + #define SLINK_RX_FULL (1 << 22) 98 + #define SLINK_RX_EMPTY (1 << 23) 99 + #define SLINK_TX_UNF (1 << 24) 100 + #define SLINK_RX_OVF (1 << 25) 101 + #define SLINK_TX_FLUSH (1 << 26) 102 + #define SLINK_RX_FLUSH (1 << 27) 103 + #define SLINK_SCLK (1 << 28) 104 + #define SLINK_ERR (1 << 29) 105 + #define SLINK_RDY (1 << 30) 106 + #define SLINK_BSY (1 << 31) 107 + #define SLINK_FIFO_ERROR (SLINK_TX_OVF | SLINK_RX_UNF | \ 108 + SLINK_TX_UNF | SLINK_RX_OVF) 109 + 110 + #define SLINK_FIFO_EMPTY (SLINK_TX_EMPTY | SLINK_RX_EMPTY) 111 + 112 + #define SLINK_MAS_DATA 0x010 113 + #define SLINK_SLAVE_DATA 0x014 114 + 115 + #define SLINK_DMA_CTL 0x018 116 + #define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0) 117 + #define SLINK_TX_TRIG_1 (0 << 16) 118 + #define SLINK_TX_TRIG_4 (1 << 16) 119 + #define SLINK_TX_TRIG_8 (2 << 16) 120 + #define SLINK_TX_TRIG_16 (3 << 16) 121 + #define SLINK_TX_TRIG_MASK (3 << 16) 122 + #define SLINK_RX_TRIG_1 (0 << 18) 123 + #define SLINK_RX_TRIG_4 (1 << 18) 124 + #define SLINK_RX_TRIG_8 (2 << 18) 125 + #define SLINK_RX_TRIG_16 (3 << 18) 126 + #define SLINK_RX_TRIG_MASK (3 << 18) 127 + #define SLINK_PACKED (1 << 20) 128 + #define SLINK_PACK_SIZE_4 (0 << 21) 129 + #define SLINK_PACK_SIZE_8 (1 << 21) 130 + #define SLINK_PACK_SIZE_16 (2 << 21) 131 + #define SLINK_PACK_SIZE_32 (3 << 21) 132 + #define SLINK_PACK_SIZE_MASK (3 << 21) 133 + #define SLINK_IE_TXC (1 << 26) 134 + #define SLINK_IE_RXC (1 << 27) 135 + #define SLINK_DMA_EN (1 << 31) 136 + 137 + #define SLINK_STATUS2 0x01c 138 + #define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0) 139 + #define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16) 140 + #define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6) 141 + 142 + #define SLINK_TX_FIFO 0x100 143 + #define SLINK_RX_FIFO 0x180 144 + 145 + #define DATA_DIR_TX (1 << 0) 146 + #define DATA_DIR_RX (1 << 1) 147 + 148 + #define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000)) 149 + 150 + #define DEFAULT_SPI_DMA_BUF_LEN (16*1024) 151 + #define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20) 152 + #define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0) 153 + 154 + #define SLINK_STATUS2_RESET \ 155 + (TX_FIFO_EMPTY_COUNT_MAX | RX_FIFO_FULL_COUNT_ZERO << 16) 156 + 157 + #define MAX_CHIP_SELECT 4 158 + #define SLINK_FIFO_DEPTH 32 159 + 160 + struct tegra_slink_chip_data { 161 + bool cs_hold_time; 162 + }; 163 + 164 + struct tegra_slink_data { 165 + struct device *dev; 166 + struct spi_master *master; 167 + const struct tegra_slink_chip_data *chip_data; 168 + spinlock_t lock; 169 + 170 + struct clk *clk; 171 + void __iomem *base; 172 + phys_addr_t phys; 173 + unsigned irq; 174 + int dma_req_sel; 175 + u32 spi_max_frequency; 176 + u32 cur_speed; 177 + 178 + struct spi_device *cur_spi; 179 + unsigned cur_pos; 180 + unsigned cur_len; 181 + unsigned words_per_32bit; 182 + unsigned bytes_per_word; 183 + unsigned curr_dma_words; 184 + unsigned cur_direction; 185 + 186 + unsigned cur_rx_pos; 187 + unsigned cur_tx_pos; 188 + 189 + unsigned dma_buf_size; 190 + unsigned max_buf_size; 191 + bool is_curr_dma_xfer; 192 + bool is_hw_based_cs; 193 + 194 + struct completion rx_dma_complete; 195 + struct completion tx_dma_complete; 196 + 197 + u32 tx_status; 198 + u32 rx_status; 199 + u32 status_reg; 200 + bool is_packed; 201 + unsigned long packed_size; 202 + 203 + u32 command_reg; 204 + u32 command2_reg; 205 + u32 dma_control_reg; 206 + u32 def_command_reg; 207 + u32 def_command2_reg; 208 + 209 + struct completion xfer_completion; 210 + struct spi_transfer *curr_xfer; 211 + struct dma_chan *rx_dma_chan; 212 + u32 *rx_dma_buf; 213 + dma_addr_t rx_dma_phys; 214 + struct dma_async_tx_descriptor *rx_dma_desc; 215 + 216 + struct dma_chan *tx_dma_chan; 217 + u32 *tx_dma_buf; 218 + dma_addr_t tx_dma_phys; 219 + struct dma_async_tx_descriptor *tx_dma_desc; 220 + }; 221 + 222 + static int tegra_slink_runtime_suspend(struct device *dev); 223 + static int tegra_slink_runtime_resume(struct device *dev); 224 + 225 + static inline unsigned long tegra_slink_readl(struct tegra_slink_data *tspi, 226 + unsigned long reg) 227 + { 228 + return readl(tspi->base + reg); 229 + } 230 + 231 + static inline void tegra_slink_writel(struct tegra_slink_data *tspi, 232 + unsigned long val, unsigned long reg) 233 + { 234 + writel(val, tspi->base + reg); 235 + 236 + /* Read back register to make sure that register writes completed */ 237 + if (reg != SLINK_TX_FIFO) 238 + readl(tspi->base + SLINK_MAS_DATA); 239 + } 240 + 241 + static void tegra_slink_clear_status(struct tegra_slink_data *tspi) 242 + { 243 + unsigned long val; 244 + unsigned long val_write = 0; 245 + 246 + val = tegra_slink_readl(tspi, SLINK_STATUS); 247 + 248 + /* Write 1 to clear status register */ 249 + val_write = SLINK_RDY | SLINK_FIFO_ERROR; 250 + tegra_slink_writel(tspi, val_write, SLINK_STATUS); 251 + } 252 + 253 + static unsigned long tegra_slink_get_packed_size(struct tegra_slink_data *tspi, 254 + struct spi_transfer *t) 255 + { 256 + unsigned long val; 257 + 258 + switch (tspi->bytes_per_word) { 259 + case 0: 260 + val = SLINK_PACK_SIZE_4; 261 + break; 262 + case 1: 263 + val = SLINK_PACK_SIZE_8; 264 + break; 265 + case 2: 266 + val = SLINK_PACK_SIZE_16; 267 + break; 268 + case 4: 269 + val = SLINK_PACK_SIZE_32; 270 + break; 271 + default: 272 + val = 0; 273 + } 274 + return val; 275 + } 276 + 277 + static unsigned tegra_slink_calculate_curr_xfer_param( 278 + struct spi_device *spi, struct tegra_slink_data *tspi, 279 + struct spi_transfer *t) 280 + { 281 + unsigned remain_len = t->len - tspi->cur_pos; 282 + unsigned max_word; 283 + unsigned bits_per_word ; 284 + unsigned max_len; 285 + unsigned total_fifo_words; 286 + 287 + bits_per_word = t->bits_per_word ? t->bits_per_word : 288 + spi->bits_per_word; 289 + tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1; 290 + 291 + if (bits_per_word == 8 || bits_per_word == 16) { 292 + tspi->is_packed = 1; 293 + tspi->words_per_32bit = 32/bits_per_word; 294 + } else { 295 + tspi->is_packed = 0; 296 + tspi->words_per_32bit = 1; 297 + } 298 + tspi->packed_size = tegra_slink_get_packed_size(tspi, t); 299 + 300 + if (tspi->is_packed) { 301 + max_len = min(remain_len, tspi->max_buf_size); 302 + tspi->curr_dma_words = max_len/tspi->bytes_per_word; 303 + total_fifo_words = max_len/4; 304 + } else { 305 + max_word = (remain_len - 1) / tspi->bytes_per_word + 1; 306 + max_word = min(max_word, tspi->max_buf_size/4); 307 + tspi->curr_dma_words = max_word; 308 + total_fifo_words = max_word; 309 + } 310 + return total_fifo_words; 311 + } 312 + 313 + static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf( 314 + struct tegra_slink_data *tspi, struct spi_transfer *t) 315 + { 316 + unsigned nbytes; 317 + unsigned tx_empty_count; 318 + unsigned long fifo_status; 319 + unsigned max_n_32bit; 320 + unsigned i, count; 321 + unsigned long x; 322 + unsigned int written_words; 323 + unsigned fifo_words_left; 324 + u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos; 325 + 326 + fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2); 327 + tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status); 328 + 329 + if (tspi->is_packed) { 330 + fifo_words_left = tx_empty_count * tspi->words_per_32bit; 331 + written_words = min(fifo_words_left, tspi->curr_dma_words); 332 + nbytes = written_words * tspi->bytes_per_word; 333 + max_n_32bit = DIV_ROUND_UP(nbytes, 4); 334 + for (count = 0; count < max_n_32bit; count++) { 335 + x = 0; 336 + for (i = 0; (i < 4) && nbytes; i++, nbytes--) 337 + x |= (*tx_buf++) << (i*8); 338 + tegra_slink_writel(tspi, x, SLINK_TX_FIFO); 339 + } 340 + } else { 341 + max_n_32bit = min(tspi->curr_dma_words, tx_empty_count); 342 + written_words = max_n_32bit; 343 + nbytes = written_words * tspi->bytes_per_word; 344 + for (count = 0; count < max_n_32bit; count++) { 345 + x = 0; 346 + for (i = 0; nbytes && (i < tspi->bytes_per_word); 347 + i++, nbytes--) 348 + x |= ((*tx_buf++) << i*8); 349 + tegra_slink_writel(tspi, x, SLINK_TX_FIFO); 350 + } 351 + } 352 + tspi->cur_tx_pos += written_words * tspi->bytes_per_word; 353 + return written_words; 354 + } 355 + 356 + static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf( 357 + struct tegra_slink_data *tspi, struct spi_transfer *t) 358 + { 359 + unsigned rx_full_count; 360 + unsigned long fifo_status; 361 + unsigned i, count; 362 + unsigned long x; 363 + unsigned int read_words = 0; 364 + unsigned len; 365 + u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos; 366 + 367 + fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2); 368 + rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status); 369 + if (tspi->is_packed) { 370 + len = tspi->curr_dma_words * tspi->bytes_per_word; 371 + for (count = 0; count < rx_full_count; count++) { 372 + x = tegra_slink_readl(tspi, SLINK_RX_FIFO); 373 + for (i = 0; len && (i < 4); i++, len--) 374 + *rx_buf++ = (x >> i*8) & 0xFF; 375 + } 376 + tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word; 377 + read_words += tspi->curr_dma_words; 378 + } else { 379 + unsigned int bits_per_word; 380 + 381 + bits_per_word = t->bits_per_word ? t->bits_per_word : 382 + tspi->cur_spi->bits_per_word; 383 + for (count = 0; count < rx_full_count; count++) { 384 + x = tegra_slink_readl(tspi, SLINK_RX_FIFO); 385 + for (i = 0; (i < tspi->bytes_per_word); i++) 386 + *rx_buf++ = (x >> (i*8)) & 0xFF; 387 + } 388 + tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word; 389 + read_words += rx_full_count; 390 + } 391 + return read_words; 392 + } 393 + 394 + static void tegra_slink_copy_client_txbuf_to_spi_txbuf( 395 + struct tegra_slink_data *tspi, struct spi_transfer *t) 396 + { 397 + unsigned len; 398 + 399 + /* Make the dma buffer to read by cpu */ 400 + dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys, 401 + tspi->dma_buf_size, DMA_TO_DEVICE); 402 + 403 + if (tspi->is_packed) { 404 + len = tspi->curr_dma_words * tspi->bytes_per_word; 405 + memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len); 406 + } else { 407 + unsigned int i; 408 + unsigned int count; 409 + u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos; 410 + unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word; 411 + unsigned int x; 412 + 413 + for (count = 0; count < tspi->curr_dma_words; count++) { 414 + x = 0; 415 + for (i = 0; consume && (i < tspi->bytes_per_word); 416 + i++, consume--) 417 + x |= ((*tx_buf++) << i * 8); 418 + tspi->tx_dma_buf[count] = x; 419 + } 420 + } 421 + tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word; 422 + 423 + /* Make the dma buffer to read by dma */ 424 + dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys, 425 + tspi->dma_buf_size, DMA_TO_DEVICE); 426 + } 427 + 428 + static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf( 429 + struct tegra_slink_data *tspi, struct spi_transfer *t) 430 + { 431 + unsigned len; 432 + 433 + /* Make the dma buffer to read by cpu */ 434 + dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys, 435 + tspi->dma_buf_size, DMA_FROM_DEVICE); 436 + 437 + if (tspi->is_packed) { 438 + len = tspi->curr_dma_words * tspi->bytes_per_word; 439 + memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len); 440 + } else { 441 + unsigned int i; 442 + unsigned int count; 443 + unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos; 444 + unsigned int x; 445 + unsigned int rx_mask, bits_per_word; 446 + 447 + bits_per_word = t->bits_per_word ? t->bits_per_word : 448 + tspi->cur_spi->bits_per_word; 449 + rx_mask = (1 << bits_per_word) - 1; 450 + for (count = 0; count < tspi->curr_dma_words; count++) { 451 + x = tspi->rx_dma_buf[count]; 452 + x &= rx_mask; 453 + for (i = 0; (i < tspi->bytes_per_word); i++) 454 + *rx_buf++ = (x >> (i*8)) & 0xFF; 455 + } 456 + } 457 + tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word; 458 + 459 + /* Make the dma buffer to read by dma */ 460 + dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys, 461 + tspi->dma_buf_size, DMA_FROM_DEVICE); 462 + } 463 + 464 + static void tegra_slink_dma_complete(void *args) 465 + { 466 + struct completion *dma_complete = args; 467 + 468 + complete(dma_complete); 469 + } 470 + 471 + static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len) 472 + { 473 + INIT_COMPLETION(tspi->tx_dma_complete); 474 + tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan, 475 + tspi->tx_dma_phys, len, DMA_MEM_TO_DEV, 476 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 477 + if (!tspi->tx_dma_desc) { 478 + dev_err(tspi->dev, "Not able to get desc for Tx\n"); 479 + return -EIO; 480 + } 481 + 482 + tspi->tx_dma_desc->callback = tegra_slink_dma_complete; 483 + tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete; 484 + 485 + dmaengine_submit(tspi->tx_dma_desc); 486 + dma_async_issue_pending(tspi->tx_dma_chan); 487 + return 0; 488 + } 489 + 490 + static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len) 491 + { 492 + INIT_COMPLETION(tspi->rx_dma_complete); 493 + tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan, 494 + tspi->rx_dma_phys, len, DMA_DEV_TO_MEM, 495 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 496 + if (!tspi->rx_dma_desc) { 497 + dev_err(tspi->dev, "Not able to get desc for Rx\n"); 498 + return -EIO; 499 + } 500 + 501 + tspi->rx_dma_desc->callback = tegra_slink_dma_complete; 502 + tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete; 503 + 504 + dmaengine_submit(tspi->rx_dma_desc); 505 + dma_async_issue_pending(tspi->rx_dma_chan); 506 + return 0; 507 + } 508 + 509 + static int tegra_slink_start_dma_based_transfer( 510 + struct tegra_slink_data *tspi, struct spi_transfer *t) 511 + { 512 + unsigned long val; 513 + unsigned long test_val; 514 + unsigned int len; 515 + int ret = 0; 516 + unsigned long status; 517 + 518 + /* Make sure that Rx and Tx fifo are empty */ 519 + status = tegra_slink_readl(tspi, SLINK_STATUS); 520 + if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) { 521 + dev_err(tspi->dev, 522 + "Rx/Tx fifo are not empty status 0x%08lx\n", status); 523 + return -EIO; 524 + } 525 + 526 + val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1); 527 + val |= tspi->packed_size; 528 + if (tspi->is_packed) 529 + len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word, 530 + 4) * 4; 531 + else 532 + len = tspi->curr_dma_words * 4; 533 + 534 + /* Set attention level based on length of transfer */ 535 + if (len & 0xF) 536 + val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1; 537 + else if (((len) >> 4) & 0x1) 538 + val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4; 539 + else 540 + val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8; 541 + 542 + if (tspi->cur_direction & DATA_DIR_TX) 543 + val |= SLINK_IE_TXC; 544 + 545 + if (tspi->cur_direction & DATA_DIR_RX) 546 + val |= SLINK_IE_RXC; 547 + 548 + tegra_slink_writel(tspi, val, SLINK_DMA_CTL); 549 + tspi->dma_control_reg = val; 550 + 551 + if (tspi->cur_direction & DATA_DIR_TX) { 552 + tegra_slink_copy_client_txbuf_to_spi_txbuf(tspi, t); 553 + wmb(); 554 + ret = tegra_slink_start_tx_dma(tspi, len); 555 + if (ret < 0) { 556 + dev_err(tspi->dev, 557 + "Starting tx dma failed, err %d\n", ret); 558 + return ret; 559 + } 560 + 561 + /* Wait for tx fifo to be fill before starting slink */ 562 + test_val = tegra_slink_readl(tspi, SLINK_STATUS); 563 + while (!(test_val & SLINK_TX_FULL)) 564 + test_val = tegra_slink_readl(tspi, SLINK_STATUS); 565 + } 566 + 567 + if (tspi->cur_direction & DATA_DIR_RX) { 568 + /* Make the dma buffer to read by dma */ 569 + dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys, 570 + tspi->dma_buf_size, DMA_FROM_DEVICE); 571 + 572 + ret = tegra_slink_start_rx_dma(tspi, len); 573 + if (ret < 0) { 574 + dev_err(tspi->dev, 575 + "Starting rx dma failed, err %d\n", ret); 576 + if (tspi->cur_direction & DATA_DIR_TX) 577 + dmaengine_terminate_all(tspi->tx_dma_chan); 578 + return ret; 579 + } 580 + } 581 + tspi->is_curr_dma_xfer = true; 582 + if (tspi->is_packed) { 583 + val |= SLINK_PACKED; 584 + tegra_slink_writel(tspi, val, SLINK_DMA_CTL); 585 + /* HW need small delay after settign Packed mode */ 586 + udelay(1); 587 + } 588 + tspi->dma_control_reg = val; 589 + 590 + val |= SLINK_DMA_EN; 591 + tegra_slink_writel(tspi, val, SLINK_DMA_CTL); 592 + return ret; 593 + } 594 + 595 + static int tegra_slink_start_cpu_based_transfer( 596 + struct tegra_slink_data *tspi, struct spi_transfer *t) 597 + { 598 + unsigned long val; 599 + unsigned cur_words; 600 + 601 + val = tspi->packed_size; 602 + if (tspi->cur_direction & DATA_DIR_TX) 603 + val |= SLINK_IE_TXC; 604 + 605 + if (tspi->cur_direction & DATA_DIR_RX) 606 + val |= SLINK_IE_RXC; 607 + 608 + tegra_slink_writel(tspi, val, SLINK_DMA_CTL); 609 + tspi->dma_control_reg = val; 610 + 611 + if (tspi->cur_direction & DATA_DIR_TX) 612 + cur_words = tegra_slink_fill_tx_fifo_from_client_txbuf(tspi, t); 613 + else 614 + cur_words = tspi->curr_dma_words; 615 + val |= SLINK_DMA_BLOCK_SIZE(cur_words - 1); 616 + tegra_slink_writel(tspi, val, SLINK_DMA_CTL); 617 + tspi->dma_control_reg = val; 618 + 619 + tspi->is_curr_dma_xfer = false; 620 + if (tspi->is_packed) { 621 + val |= SLINK_PACKED; 622 + tegra_slink_writel(tspi, val, SLINK_DMA_CTL); 623 + udelay(1); 624 + wmb(); 625 + } 626 + tspi->dma_control_reg = val; 627 + val |= SLINK_DMA_EN; 628 + tegra_slink_writel(tspi, val, SLINK_DMA_CTL); 629 + return 0; 630 + } 631 + 632 + static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi, 633 + bool dma_to_memory) 634 + { 635 + struct dma_chan *dma_chan; 636 + u32 *dma_buf; 637 + dma_addr_t dma_phys; 638 + int ret; 639 + struct dma_slave_config dma_sconfig; 640 + dma_cap_mask_t mask; 641 + 642 + dma_cap_zero(mask); 643 + dma_cap_set(DMA_SLAVE, mask); 644 + dma_chan = dma_request_channel(mask, NULL, NULL); 645 + if (!dma_chan) { 646 + dev_err(tspi->dev, 647 + "Dma channel is not available, will try later\n"); 648 + return -EPROBE_DEFER; 649 + } 650 + 651 + dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size, 652 + &dma_phys, GFP_KERNEL); 653 + if (!dma_buf) { 654 + dev_err(tspi->dev, " Not able to allocate the dma buffer\n"); 655 + dma_release_channel(dma_chan); 656 + return -ENOMEM; 657 + } 658 + 659 + dma_sconfig.slave_id = tspi->dma_req_sel; 660 + if (dma_to_memory) { 661 + dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO; 662 + dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 663 + dma_sconfig.src_maxburst = 0; 664 + } else { 665 + dma_sconfig.dst_addr = tspi->phys + SLINK_TX_FIFO; 666 + dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 667 + dma_sconfig.dst_maxburst = 0; 668 + } 669 + 670 + ret = dmaengine_slave_config(dma_chan, &dma_sconfig); 671 + if (ret) 672 + goto scrub; 673 + if (dma_to_memory) { 674 + tspi->rx_dma_chan = dma_chan; 675 + tspi->rx_dma_buf = dma_buf; 676 + tspi->rx_dma_phys = dma_phys; 677 + } else { 678 + tspi->tx_dma_chan = dma_chan; 679 + tspi->tx_dma_buf = dma_buf; 680 + tspi->tx_dma_phys = dma_phys; 681 + } 682 + return 0; 683 + 684 + scrub: 685 + dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys); 686 + dma_release_channel(dma_chan); 687 + return ret; 688 + } 689 + 690 + static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi, 691 + bool dma_to_memory) 692 + { 693 + u32 *dma_buf; 694 + dma_addr_t dma_phys; 695 + struct dma_chan *dma_chan; 696 + 697 + if (dma_to_memory) { 698 + dma_buf = tspi->rx_dma_buf; 699 + dma_chan = tspi->rx_dma_chan; 700 + dma_phys = tspi->rx_dma_phys; 701 + tspi->rx_dma_chan = NULL; 702 + tspi->rx_dma_buf = NULL; 703 + } else { 704 + dma_buf = tspi->tx_dma_buf; 705 + dma_chan = tspi->tx_dma_chan; 706 + dma_phys = tspi->tx_dma_phys; 707 + tspi->tx_dma_buf = NULL; 708 + tspi->tx_dma_chan = NULL; 709 + } 710 + if (!dma_chan) 711 + return; 712 + 713 + dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys); 714 + dma_release_channel(dma_chan); 715 + } 716 + 717 + static int tegra_slink_start_transfer_one(struct spi_device *spi, 718 + struct spi_transfer *t, bool is_first_of_msg, 719 + bool is_single_xfer) 720 + { 721 + struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master); 722 + u32 speed; 723 + u8 bits_per_word; 724 + unsigned total_fifo_words; 725 + int ret; 726 + struct tegra_spi_device_controller_data *cdata = spi->controller_data; 727 + unsigned long command; 728 + unsigned long command2; 729 + 730 + bits_per_word = t->bits_per_word; 731 + speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz; 732 + if (!speed) 733 + speed = tspi->spi_max_frequency; 734 + if (speed != tspi->cur_speed) { 735 + clk_set_rate(tspi->clk, speed * 4); 736 + tspi->cur_speed = speed; 737 + } 738 + 739 + tspi->cur_spi = spi; 740 + tspi->cur_pos = 0; 741 + tspi->cur_rx_pos = 0; 742 + tspi->cur_tx_pos = 0; 743 + tspi->curr_xfer = t; 744 + total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t); 745 + 746 + if (is_first_of_msg) { 747 + tegra_slink_clear_status(tspi); 748 + 749 + command = tspi->def_command_reg; 750 + command |= SLINK_BIT_LENGTH(bits_per_word - 1); 751 + 752 + command2 = tspi->def_command2_reg; 753 + command2 |= SLINK_SS_EN_CS(spi->chip_select); 754 + 755 + /* possibly use the hw based chip select */ 756 + tspi->is_hw_based_cs = false; 757 + if (cdata && cdata->is_hw_based_cs && is_single_xfer && 758 + ((tspi->curr_dma_words * tspi->bytes_per_word) == 759 + (t->len - tspi->cur_pos))) { 760 + int setup_count; 761 + int sts2; 762 + 763 + setup_count = cdata->cs_setup_clk_count >> 1; 764 + setup_count = max(setup_count, 3); 765 + command2 |= SLINK_SS_SETUP(setup_count); 766 + if (tspi->chip_data->cs_hold_time) { 767 + int hold_count; 768 + 769 + hold_count = cdata->cs_hold_clk_count; 770 + hold_count = max(hold_count, 0xF); 771 + sts2 = tegra_slink_readl(tspi, SLINK_STATUS2); 772 + sts2 &= ~SLINK_SS_HOLD_TIME(0xF); 773 + sts2 |= SLINK_SS_HOLD_TIME(hold_count); 774 + tegra_slink_writel(tspi, sts2, SLINK_STATUS2); 775 + } 776 + tspi->is_hw_based_cs = true; 777 + } 778 + 779 + if (tspi->is_hw_based_cs) 780 + command &= ~SLINK_CS_SW; 781 + else 782 + command |= SLINK_CS_SW | SLINK_CS_VALUE; 783 + 784 + command &= ~SLINK_MODES; 785 + if (spi->mode & SPI_CPHA) 786 + command |= SLINK_CK_SDA; 787 + 788 + if (spi->mode & SPI_CPOL) 789 + command |= SLINK_IDLE_SCLK_DRIVE_HIGH; 790 + else 791 + command |= SLINK_IDLE_SCLK_DRIVE_LOW; 792 + } else { 793 + command = tspi->command_reg; 794 + command &= ~SLINK_BIT_LENGTH(~0); 795 + command |= SLINK_BIT_LENGTH(bits_per_word - 1); 796 + 797 + command2 = tspi->command2_reg; 798 + command2 &= ~(SLINK_RXEN | SLINK_TXEN); 799 + } 800 + 801 + tegra_slink_writel(tspi, command, SLINK_COMMAND); 802 + tspi->command_reg = command; 803 + 804 + tspi->cur_direction = 0; 805 + if (t->rx_buf) { 806 + command2 |= SLINK_RXEN; 807 + tspi->cur_direction |= DATA_DIR_RX; 808 + } 809 + if (t->tx_buf) { 810 + command2 |= SLINK_TXEN; 811 + tspi->cur_direction |= DATA_DIR_TX; 812 + } 813 + tegra_slink_writel(tspi, command2, SLINK_COMMAND2); 814 + tspi->command2_reg = command2; 815 + 816 + if (total_fifo_words > SLINK_FIFO_DEPTH) 817 + ret = tegra_slink_start_dma_based_transfer(tspi, t); 818 + else 819 + ret = tegra_slink_start_cpu_based_transfer(tspi, t); 820 + return ret; 821 + } 822 + 823 + static int tegra_slink_setup(struct spi_device *spi) 824 + { 825 + struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master); 826 + unsigned long val; 827 + unsigned long flags; 828 + int ret; 829 + unsigned int cs_pol_bit[MAX_CHIP_SELECT] = { 830 + SLINK_CS_POLARITY, 831 + SLINK_CS_POLARITY1, 832 + SLINK_CS_POLARITY2, 833 + SLINK_CS_POLARITY3, 834 + }; 835 + 836 + dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n", 837 + spi->bits_per_word, 838 + spi->mode & SPI_CPOL ? "" : "~", 839 + spi->mode & SPI_CPHA ? "" : "~", 840 + spi->max_speed_hz); 841 + 842 + BUG_ON(spi->chip_select >= MAX_CHIP_SELECT); 843 + 844 + ret = pm_runtime_get_sync(tspi->dev); 845 + if (ret < 0) { 846 + dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); 847 + return ret; 848 + } 849 + 850 + spin_lock_irqsave(&tspi->lock, flags); 851 + val = tspi->def_command_reg; 852 + if (spi->mode & SPI_CS_HIGH) 853 + val |= cs_pol_bit[spi->chip_select]; 854 + else 855 + val &= ~cs_pol_bit[spi->chip_select]; 856 + tspi->def_command_reg = val; 857 + tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); 858 + spin_unlock_irqrestore(&tspi->lock, flags); 859 + 860 + pm_runtime_put(tspi->dev); 861 + return 0; 862 + } 863 + 864 + static int tegra_slink_prepare_transfer(struct spi_master *master) 865 + { 866 + struct tegra_slink_data *tspi = spi_master_get_devdata(master); 867 + 868 + return pm_runtime_get_sync(tspi->dev); 869 + } 870 + 871 + static int tegra_slink_unprepare_transfer(struct spi_master *master) 872 + { 873 + struct tegra_slink_data *tspi = spi_master_get_devdata(master); 874 + 875 + pm_runtime_put(tspi->dev); 876 + return 0; 877 + } 878 + 879 + static int tegra_slink_transfer_one_message(struct spi_master *master, 880 + struct spi_message *msg) 881 + { 882 + bool is_first_msg = true; 883 + int single_xfer; 884 + struct tegra_slink_data *tspi = spi_master_get_devdata(master); 885 + struct spi_transfer *xfer; 886 + struct spi_device *spi = msg->spi; 887 + int ret; 888 + 889 + msg->status = 0; 890 + msg->actual_length = 0; 891 + single_xfer = list_is_singular(&msg->transfers); 892 + list_for_each_entry(xfer, &msg->transfers, transfer_list) { 893 + INIT_COMPLETION(tspi->xfer_completion); 894 + ret = tegra_slink_start_transfer_one(spi, xfer, 895 + is_first_msg, single_xfer); 896 + if (ret < 0) { 897 + dev_err(tspi->dev, 898 + "spi can not start transfer, err %d\n", ret); 899 + goto exit; 900 + } 901 + is_first_msg = false; 902 + ret = wait_for_completion_timeout(&tspi->xfer_completion, 903 + SLINK_DMA_TIMEOUT); 904 + if (WARN_ON(ret == 0)) { 905 + dev_err(tspi->dev, 906 + "spi trasfer timeout, err %d\n", ret); 907 + ret = -EIO; 908 + goto exit; 909 + } 910 + 911 + if (tspi->tx_status || tspi->rx_status) { 912 + dev_err(tspi->dev, "Error in Transfer\n"); 913 + ret = -EIO; 914 + goto exit; 915 + } 916 + msg->actual_length += xfer->len; 917 + if (xfer->cs_change && xfer->delay_usecs) { 918 + tegra_slink_writel(tspi, tspi->def_command_reg, 919 + SLINK_COMMAND); 920 + udelay(xfer->delay_usecs); 921 + } 922 + } 923 + ret = 0; 924 + exit: 925 + tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); 926 + tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); 927 + msg->status = ret; 928 + spi_finalize_current_message(master); 929 + return ret; 930 + } 931 + 932 + static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi) 933 + { 934 + struct spi_transfer *t = tspi->curr_xfer; 935 + unsigned long flags; 936 + 937 + spin_lock_irqsave(&tspi->lock, flags); 938 + if (tspi->tx_status || tspi->rx_status || 939 + (tspi->status_reg & SLINK_BSY)) { 940 + dev_err(tspi->dev, 941 + "CpuXfer ERROR bit set 0x%x\n", tspi->status_reg); 942 + dev_err(tspi->dev, 943 + "CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg, 944 + tspi->command2_reg, tspi->dma_control_reg); 945 + tegra_periph_reset_assert(tspi->clk); 946 + udelay(2); 947 + tegra_periph_reset_deassert(tspi->clk); 948 + complete(&tspi->xfer_completion); 949 + goto exit; 950 + } 951 + 952 + if (tspi->cur_direction & DATA_DIR_RX) 953 + tegra_slink_read_rx_fifo_to_client_rxbuf(tspi, t); 954 + 955 + if (tspi->cur_direction & DATA_DIR_TX) 956 + tspi->cur_pos = tspi->cur_tx_pos; 957 + else 958 + tspi->cur_pos = tspi->cur_rx_pos; 959 + 960 + if (tspi->cur_pos == t->len) { 961 + complete(&tspi->xfer_completion); 962 + goto exit; 963 + } 964 + 965 + tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, tspi, t); 966 + tegra_slink_start_cpu_based_transfer(tspi, t); 967 + exit: 968 + spin_unlock_irqrestore(&tspi->lock, flags); 969 + return IRQ_HANDLED; 970 + } 971 + 972 + static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi) 973 + { 974 + struct spi_transfer *t = tspi->curr_xfer; 975 + long wait_status; 976 + int err = 0; 977 + unsigned total_fifo_words; 978 + unsigned long flags; 979 + 980 + /* Abort dmas if any error */ 981 + if (tspi->cur_direction & DATA_DIR_TX) { 982 + if (tspi->tx_status) { 983 + dmaengine_terminate_all(tspi->tx_dma_chan); 984 + err += 1; 985 + } else { 986 + wait_status = wait_for_completion_interruptible_timeout( 987 + &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT); 988 + if (wait_status <= 0) { 989 + dmaengine_terminate_all(tspi->tx_dma_chan); 990 + dev_err(tspi->dev, "TxDma Xfer failed\n"); 991 + err += 1; 992 + } 993 + } 994 + } 995 + 996 + if (tspi->cur_direction & DATA_DIR_RX) { 997 + if (tspi->rx_status) { 998 + dmaengine_terminate_all(tspi->rx_dma_chan); 999 + err += 2; 1000 + } else { 1001 + wait_status = wait_for_completion_interruptible_timeout( 1002 + &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT); 1003 + if (wait_status <= 0) { 1004 + dmaengine_terminate_all(tspi->rx_dma_chan); 1005 + dev_err(tspi->dev, "RxDma Xfer failed\n"); 1006 + err += 2; 1007 + } 1008 + } 1009 + } 1010 + 1011 + spin_lock_irqsave(&tspi->lock, flags); 1012 + if (err) { 1013 + dev_err(tspi->dev, 1014 + "DmaXfer: ERROR bit set 0x%x\n", tspi->status_reg); 1015 + dev_err(tspi->dev, 1016 + "DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg, 1017 + tspi->command2_reg, tspi->dma_control_reg); 1018 + tegra_periph_reset_assert(tspi->clk); 1019 + udelay(2); 1020 + tegra_periph_reset_deassert(tspi->clk); 1021 + complete(&tspi->xfer_completion); 1022 + spin_unlock_irqrestore(&tspi->lock, flags); 1023 + return IRQ_HANDLED; 1024 + } 1025 + 1026 + if (tspi->cur_direction & DATA_DIR_RX) 1027 + tegra_slink_copy_spi_rxbuf_to_client_rxbuf(tspi, t); 1028 + 1029 + if (tspi->cur_direction & DATA_DIR_TX) 1030 + tspi->cur_pos = tspi->cur_tx_pos; 1031 + else 1032 + tspi->cur_pos = tspi->cur_rx_pos; 1033 + 1034 + if (tspi->cur_pos == t->len) { 1035 + complete(&tspi->xfer_completion); 1036 + goto exit; 1037 + } 1038 + 1039 + /* Continue transfer in current message */ 1040 + total_fifo_words = tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, 1041 + tspi, t); 1042 + if (total_fifo_words > SLINK_FIFO_DEPTH) 1043 + err = tegra_slink_start_dma_based_transfer(tspi, t); 1044 + else 1045 + err = tegra_slink_start_cpu_based_transfer(tspi, t); 1046 + 1047 + exit: 1048 + spin_unlock_irqrestore(&tspi->lock, flags); 1049 + return IRQ_HANDLED; 1050 + } 1051 + 1052 + static irqreturn_t tegra_slink_isr_thread(int irq, void *context_data) 1053 + { 1054 + struct tegra_slink_data *tspi = context_data; 1055 + 1056 + if (!tspi->is_curr_dma_xfer) 1057 + return handle_cpu_based_xfer(tspi); 1058 + return handle_dma_based_xfer(tspi); 1059 + } 1060 + 1061 + static irqreturn_t tegra_slink_isr(int irq, void *context_data) 1062 + { 1063 + struct tegra_slink_data *tspi = context_data; 1064 + 1065 + tspi->status_reg = tegra_slink_readl(tspi, SLINK_STATUS); 1066 + if (tspi->cur_direction & DATA_DIR_TX) 1067 + tspi->tx_status = tspi->status_reg & 1068 + (SLINK_TX_OVF | SLINK_TX_UNF); 1069 + 1070 + if (tspi->cur_direction & DATA_DIR_RX) 1071 + tspi->rx_status = tspi->status_reg & 1072 + (SLINK_RX_OVF | SLINK_RX_UNF); 1073 + tegra_slink_clear_status(tspi); 1074 + 1075 + return IRQ_WAKE_THREAD; 1076 + } 1077 + 1078 + static struct tegra_spi_platform_data *tegra_slink_parse_dt( 1079 + struct platform_device *pdev) 1080 + { 1081 + struct tegra_spi_platform_data *pdata; 1082 + const unsigned int *prop; 1083 + struct device_node *np = pdev->dev.of_node; 1084 + u32 of_dma[2]; 1085 + 1086 + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1087 + if (!pdata) { 1088 + dev_err(&pdev->dev, "Memory alloc for pdata failed\n"); 1089 + return NULL; 1090 + } 1091 + 1092 + if (of_property_read_u32_array(np, "nvidia,dma-request-selector", 1093 + of_dma, 2) >= 0) 1094 + pdata->dma_req_sel = of_dma[1]; 1095 + 1096 + prop = of_get_property(np, "spi-max-frequency", NULL); 1097 + if (prop) 1098 + pdata->spi_max_frequency = be32_to_cpup(prop); 1099 + 1100 + return pdata; 1101 + } 1102 + 1103 + const struct tegra_slink_chip_data tegra30_spi_cdata = { 1104 + .cs_hold_time = true, 1105 + }; 1106 + 1107 + const struct tegra_slink_chip_data tegra20_spi_cdata = { 1108 + .cs_hold_time = false, 1109 + }; 1110 + 1111 + static struct of_device_id tegra_slink_of_match[] __devinitconst = { 1112 + { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, }, 1113 + { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, }, 1114 + {} 1115 + }; 1116 + MODULE_DEVICE_TABLE(of, tegra_slink_of_match); 1117 + 1118 + static int __devinit tegra_slink_probe(struct platform_device *pdev) 1119 + { 1120 + struct spi_master *master; 1121 + struct tegra_slink_data *tspi; 1122 + struct resource *r; 1123 + struct tegra_spi_platform_data *pdata = pdev->dev.platform_data; 1124 + int ret, spi_irq; 1125 + const struct tegra_slink_chip_data *cdata = NULL; 1126 + const struct of_device_id *match; 1127 + 1128 + match = of_match_device(of_match_ptr(tegra_slink_of_match), &pdev->dev); 1129 + if (!match) { 1130 + dev_err(&pdev->dev, "Error: No device match found\n"); 1131 + return -ENODEV; 1132 + } 1133 + cdata = match->data; 1134 + if (!pdata && pdev->dev.of_node) 1135 + pdata = tegra_slink_parse_dt(pdev); 1136 + 1137 + if (!pdata) { 1138 + dev_err(&pdev->dev, "No platform data, exiting\n"); 1139 + return -ENODEV; 1140 + } 1141 + 1142 + if (!pdata->spi_max_frequency) 1143 + pdata->spi_max_frequency = 25000000; /* 25MHz */ 1144 + 1145 + master = spi_alloc_master(&pdev->dev, sizeof(*tspi)); 1146 + if (!master) { 1147 + dev_err(&pdev->dev, "master allocation failed\n"); 1148 + return -ENOMEM; 1149 + } 1150 + 1151 + /* the spi->mode bits understood by this driver: */ 1152 + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1153 + master->setup = tegra_slink_setup; 1154 + master->prepare_transfer_hardware = tegra_slink_prepare_transfer; 1155 + master->transfer_one_message = tegra_slink_transfer_one_message; 1156 + master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer; 1157 + master->num_chipselect = MAX_CHIP_SELECT; 1158 + master->bus_num = -1; 1159 + 1160 + dev_set_drvdata(&pdev->dev, master); 1161 + tspi = spi_master_get_devdata(master); 1162 + tspi->master = master; 1163 + tspi->dma_req_sel = pdata->dma_req_sel; 1164 + tspi->dev = &pdev->dev; 1165 + tspi->chip_data = cdata; 1166 + spin_lock_init(&tspi->lock); 1167 + 1168 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1169 + if (!r) { 1170 + dev_err(&pdev->dev, "No IO memory resource\n"); 1171 + ret = -ENODEV; 1172 + goto exit_free_master; 1173 + } 1174 + tspi->phys = r->start; 1175 + tspi->base = devm_request_and_ioremap(&pdev->dev, r); 1176 + if (!tspi->base) { 1177 + dev_err(&pdev->dev, 1178 + "Cannot request memregion/iomap dma address\n"); 1179 + ret = -EADDRNOTAVAIL; 1180 + goto exit_free_master; 1181 + } 1182 + 1183 + spi_irq = platform_get_irq(pdev, 0); 1184 + tspi->irq = spi_irq; 1185 + ret = request_threaded_irq(tspi->irq, tegra_slink_isr, 1186 + tegra_slink_isr_thread, IRQF_ONESHOT, 1187 + dev_name(&pdev->dev), tspi); 1188 + if (ret < 0) { 1189 + dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", 1190 + tspi->irq); 1191 + goto exit_free_master; 1192 + } 1193 + 1194 + tspi->clk = devm_clk_get(&pdev->dev, "slink"); 1195 + if (IS_ERR(tspi->clk)) { 1196 + dev_err(&pdev->dev, "can not get clock\n"); 1197 + ret = PTR_ERR(tspi->clk); 1198 + goto exit_free_irq; 1199 + } 1200 + 1201 + tspi->max_buf_size = SLINK_FIFO_DEPTH << 2; 1202 + tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN; 1203 + tspi->spi_max_frequency = pdata->spi_max_frequency; 1204 + 1205 + if (pdata->dma_req_sel) { 1206 + ret = tegra_slink_init_dma_param(tspi, true); 1207 + if (ret < 0) { 1208 + dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret); 1209 + goto exit_free_irq; 1210 + } 1211 + 1212 + ret = tegra_slink_init_dma_param(tspi, false); 1213 + if (ret < 0) { 1214 + dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret); 1215 + goto exit_rx_dma_free; 1216 + } 1217 + tspi->max_buf_size = tspi->dma_buf_size; 1218 + init_completion(&tspi->tx_dma_complete); 1219 + init_completion(&tspi->rx_dma_complete); 1220 + } 1221 + 1222 + init_completion(&tspi->xfer_completion); 1223 + 1224 + pm_runtime_enable(&pdev->dev); 1225 + if (!pm_runtime_enabled(&pdev->dev)) { 1226 + ret = tegra_slink_runtime_resume(&pdev->dev); 1227 + if (ret) 1228 + goto exit_pm_disable; 1229 + } 1230 + 1231 + ret = pm_runtime_get_sync(&pdev->dev); 1232 + if (ret < 0) { 1233 + dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret); 1234 + goto exit_pm_disable; 1235 + } 1236 + tspi->def_command_reg = SLINK_M_S; 1237 + tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN; 1238 + tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); 1239 + tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); 1240 + pm_runtime_put(&pdev->dev); 1241 + 1242 + master->dev.of_node = pdev->dev.of_node; 1243 + ret = spi_register_master(master); 1244 + if (ret < 0) { 1245 + dev_err(&pdev->dev, "can not register to master err %d\n", ret); 1246 + goto exit_pm_disable; 1247 + } 1248 + return ret; 1249 + 1250 + exit_pm_disable: 1251 + pm_runtime_disable(&pdev->dev); 1252 + if (!pm_runtime_status_suspended(&pdev->dev)) 1253 + tegra_slink_runtime_suspend(&pdev->dev); 1254 + tegra_slink_deinit_dma_param(tspi, false); 1255 + exit_rx_dma_free: 1256 + tegra_slink_deinit_dma_param(tspi, true); 1257 + exit_free_irq: 1258 + free_irq(spi_irq, tspi); 1259 + exit_free_master: 1260 + spi_master_put(master); 1261 + return ret; 1262 + } 1263 + 1264 + static int __devexit tegra_slink_remove(struct platform_device *pdev) 1265 + { 1266 + struct spi_master *master = dev_get_drvdata(&pdev->dev); 1267 + struct tegra_slink_data *tspi = spi_master_get_devdata(master); 1268 + 1269 + free_irq(tspi->irq, tspi); 1270 + spi_unregister_master(master); 1271 + 1272 + if (tspi->tx_dma_chan) 1273 + tegra_slink_deinit_dma_param(tspi, false); 1274 + 1275 + if (tspi->rx_dma_chan) 1276 + tegra_slink_deinit_dma_param(tspi, true); 1277 + 1278 + pm_runtime_disable(&pdev->dev); 1279 + if (!pm_runtime_status_suspended(&pdev->dev)) 1280 + tegra_slink_runtime_suspend(&pdev->dev); 1281 + 1282 + return 0; 1283 + } 1284 + 1285 + #ifdef CONFIG_PM_SLEEP 1286 + static int tegra_slink_suspend(struct device *dev) 1287 + { 1288 + struct spi_master *master = dev_get_drvdata(dev); 1289 + 1290 + return spi_master_suspend(master); 1291 + } 1292 + 1293 + static int tegra_slink_resume(struct device *dev) 1294 + { 1295 + struct spi_master *master = dev_get_drvdata(dev); 1296 + struct tegra_slink_data *tspi = spi_master_get_devdata(master); 1297 + int ret; 1298 + 1299 + ret = pm_runtime_get_sync(dev); 1300 + if (ret < 0) { 1301 + dev_err(dev, "pm runtime failed, e = %d\n", ret); 1302 + return ret; 1303 + } 1304 + tegra_slink_writel(tspi, tspi->command_reg, SLINK_COMMAND); 1305 + tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2); 1306 + pm_runtime_put(dev); 1307 + 1308 + return spi_master_resume(master); 1309 + } 1310 + #endif 1311 + 1312 + static int tegra_slink_runtime_suspend(struct device *dev) 1313 + { 1314 + struct spi_master *master = dev_get_drvdata(dev); 1315 + struct tegra_slink_data *tspi = spi_master_get_devdata(master); 1316 + 1317 + /* Flush all write which are in PPSB queue by reading back */ 1318 + tegra_slink_readl(tspi, SLINK_MAS_DATA); 1319 + 1320 + clk_disable_unprepare(tspi->clk); 1321 + return 0; 1322 + } 1323 + 1324 + static int tegra_slink_runtime_resume(struct device *dev) 1325 + { 1326 + struct spi_master *master = dev_get_drvdata(dev); 1327 + struct tegra_slink_data *tspi = spi_master_get_devdata(master); 1328 + int ret; 1329 + 1330 + ret = clk_prepare_enable(tspi->clk); 1331 + if (ret < 0) { 1332 + dev_err(tspi->dev, "clk_prepare failed: %d\n", ret); 1333 + return ret; 1334 + } 1335 + return 0; 1336 + } 1337 + 1338 + static const struct dev_pm_ops slink_pm_ops = { 1339 + SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend, 1340 + tegra_slink_runtime_resume, NULL) 1341 + SET_SYSTEM_SLEEP_PM_OPS(tegra_slink_suspend, tegra_slink_resume) 1342 + }; 1343 + static struct platform_driver tegra_slink_driver = { 1344 + .driver = { 1345 + .name = "spi-tegra-slink", 1346 + .owner = THIS_MODULE, 1347 + .pm = &slink_pm_ops, 1348 + .of_match_table = of_match_ptr(tegra_slink_of_match), 1349 + }, 1350 + .probe = tegra_slink_probe, 1351 + .remove = __devexit_p(tegra_slink_remove), 1352 + }; 1353 + module_platform_driver(tegra_slink_driver); 1354 + 1355 + MODULE_ALIAS("platform:spi-tegra-slink"); 1356 + MODULE_DESCRIPTION("NVIDIA Tegra20/Tegra30 SLINK Controller Driver"); 1357 + MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); 1358 + MODULE_LICENSE("GPL v2");
+13 -3
drivers/spi/spi.c
··· 1204 1204 int spi_setup(struct spi_device *spi) 1205 1205 { 1206 1206 unsigned bad_bits; 1207 - int status; 1207 + int status = 0; 1208 1208 1209 1209 /* help drivers fail *cleanly* when they need options 1210 1210 * that aren't supported with their current master ··· 1219 1219 if (!spi->bits_per_word) 1220 1220 spi->bits_per_word = 8; 1221 1221 1222 - status = spi->master->setup(spi); 1222 + if (spi->master->setup) 1223 + status = spi->master->setup(spi); 1223 1224 1224 1225 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s" 1225 1226 "%u bits/w, %u Hz max --> %d\n", ··· 1239 1238 static int __spi_async(struct spi_device *spi, struct spi_message *message) 1240 1239 { 1241 1240 struct spi_master *master = spi->master; 1241 + struct spi_transfer *xfer; 1242 1242 1243 1243 /* Half-duplex links include original MicroWire, and ones with 1244 1244 * only one data pin like SPI_3WIRE (switches direction) or where ··· 1248 1246 */ 1249 1247 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 1250 1248 || (spi->mode & SPI_3WIRE)) { 1251 - struct spi_transfer *xfer; 1252 1249 unsigned flags = master->flags; 1253 1250 1254 1251 list_for_each_entry(xfer, &message->transfers, transfer_list) { ··· 1258 1257 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 1259 1258 return -EINVAL; 1260 1259 } 1260 + } 1261 + 1262 + /** 1263 + * Set transfer bits_per_word as spi device default if it is not 1264 + * set for this transfer. 1265 + */ 1266 + list_for_each_entry(xfer, &message->transfers, transfer_list) { 1267 + if (!xfer->bits_per_word) 1268 + xfer->bits_per_word = spi->bits_per_word; 1261 1269 } 1262 1270 1263 1271 message->spi = spi;
+10
drivers/spi/spidev.c
··· 31 31 #include <linux/mutex.h> 32 32 #include <linux/slab.h> 33 33 #include <linux/compat.h> 34 + #include <linux/of.h> 35 + #include <linux/of_device.h> 34 36 35 37 #include <linux/spi/spi.h> 36 38 #include <linux/spi/spidev.h> ··· 644 642 return 0; 645 643 } 646 644 645 + static const struct of_device_id spidev_dt_ids[] = { 646 + { .compatible = "rohm,dh2228fv" }, 647 + {}, 648 + }; 649 + 650 + MODULE_DEVICE_TABLE(of, spidev_dt_ids); 651 + 647 652 static struct spi_driver spidev_spi_driver = { 648 653 .driver = { 649 654 .name = "spidev", 650 655 .owner = THIS_MODULE, 656 + .of_match_table = of_match_ptr(spidev_dt_ids), 651 657 }, 652 658 .probe = spidev_probe, 653 659 .remove = __devexit_p(spidev_remove),
+4
include/linux/platform_data/spi-omap2-mcspi.h
··· 7 7 8 8 #define OMAP4_MCSPI_REG_OFFSET 0x100 9 9 10 + #define MCSPI_PINDIR_D0_IN_D1_OUT 0 11 + #define MCSPI_PINDIR_D0_OUT_D1_IN 1 12 + 10 13 struct omap2_mcspi_platform_config { 11 14 unsigned short num_cs; 12 15 unsigned int regs_offset; 16 + unsigned int pin_dir:1; 13 17 }; 14 18 15 19 struct omap2_mcspi_dev_attr {
+40
include/linux/spi/spi-tegra.h
··· 1 + /* 2 + * spi-tegra.h: SPI interface for Nvidia Tegra20 SLINK controller. 3 + * 4 + * Copyright (C) 2011 NVIDIA Corporation 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, but WITHOUT 12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 + * more details. 15 + * 16 + * You should have received a copy of the GNU General Public License along 17 + * with this program; if not, write to the Free Software Foundation, Inc., 18 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 19 + */ 20 + 21 + #ifndef _LINUX_SPI_TEGRA_H 22 + #define _LINUX_SPI_TEGRA_H 23 + 24 + struct tegra_spi_platform_data { 25 + int dma_req_sel; 26 + unsigned int spi_max_frequency; 27 + }; 28 + 29 + /* 30 + * Controller data from device to pass some info like 31 + * hw based chip select can be used or not and if yes 32 + * then CS hold and setup time. 33 + */ 34 + struct tegra_spi_device_controller_data { 35 + bool is_hw_based_cs; 36 + int cs_setup_clk_count; 37 + int cs_hold_clk_count; 38 + }; 39 + 40 + #endif /* _LINUX_SPI_TEGRA_H */