Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'spi-v3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi

Pull spi updates from Mark Brown:
"As well as the usual driver updates and cleanups there's a few
improvements to the core here:

- The start of some improvements to factor out more of the SPI
message loop into the core. Right now this is just simplifying the
code a bit but hopefully next time around we'll also have managed
to roll out some noticable performance improvements which drivers
can take advantage of.
- Support for loading modules for ACPI enumerated SPI devices.
- Managed registration for SPI controllers.
- Helper for another common I/O pattern"

* tag 'spi-v3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (116 commits)
spi/hspi: add device tree support
spi: atmel: fix return value check in atmel_spi_probe()
spi: spi-imx: only enable the clocks when we start to transfer a message
spi/s3c64xx: Fix doubled clock disable on suspend
spi/s3c64xx: Do not ignore return value of spi_master_resume/suspend
spi: spi-mxs: Use u32 instead of uint32_t
spi: spi-mxs: Don't set clock for each xfer
spi: spi-mxs: Clean up setup_transfer function
spi: spi-mxs: Remove check of spi mode bits
spi: spi-mxs: Fix race in setup method
spi: spi-mxs: Remove bogus setting of ssp clk rate field
spi: spi-mxs: Remove full duplex check, spi core already does it
spi: spi-mxs: Fix chip select control bits in DMA mode
spi: spi-mxs: Fix extra CS pulses and read mode in multi-transfer messages
spi: spi-mxs: Change flag arguments in txrx functions to bit flags
spi: spi-mxs: Always clear INGORE_CRC, to keep CS asserted
spi: spi-mxs: Remove mxs_spi_enable and mxs_spi_disable
spi: spi-mxs: Always set LOCK_CS
spi/s3c64xx: Add missing pm_runtime_put on setup fail
spi/s3c64xx: Add missing pm_runtime_set_active() call in probe()
...

+1281 -715
+7
Documentation/devicetree/bindings/spi/sh-hspi.txt
··· 1 + Renesas HSPI. 2 + 3 + Required properties: 4 + - compatible : "renesas,hspi" 5 + - reg : Offset and length of the register set for the device 6 + - interrupts : interrupt line used by HSPI 7 +
+3
Documentation/driver-model/devres.txt
··· 303 303 304 304 SLAVE DMA ENGINE 305 305 devm_acpi_dma_controller_register() 306 + 307 + SPI 308 + devm_spi_register_master()
+1 -6
drivers/hwmon/adt7310.c
··· 42 42 static int adt7310_spi_read_word(struct device *dev, u8 reg) 43 43 { 44 44 struct spi_device *spi = to_spi_device(dev); 45 - int ret; 46 45 47 - ret = spi_w8r16(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ); 48 - if (ret < 0) 49 - return ret; 50 - 51 - return be16_to_cpu((__force __be16)ret); 46 + return spi_w8r16be(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ); 52 47 } 53 48 54 49 static int adt7310_spi_write_word(struct device *dev, u8 reg, u16 data)
+3 -2
drivers/spi/Kconfig
··· 264 264 config SPI_FSL_DSPI 265 265 tristate "Freescale DSPI controller" 266 266 select SPI_BITBANG 267 + depends on SOC_VF610 || COMPILE_TEST 267 268 help 268 269 This enables support for the Freescale DSPI controller in master 269 270 mode. VF610 platform uses the controller. ··· 370 369 371 370 config SPI_RSPI 372 371 tristate "Renesas RSPI controller" 373 - depends on SUPERH && SH_DMAE_BASE 372 + depends on (SUPERH || ARCH_SHMOBILE) && SH_DMAE_BASE 374 373 help 375 374 SPI driver for Renesas RSPI blocks. 376 375 ··· 394 393 395 394 config SPI_S3C64XX 396 395 tristate "Samsung S3C64XX series type SPI" 397 - depends on (ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS) 396 + depends on PLAT_SAMSUNG 398 397 select S3C64XX_DMA if ARCH_S3C64XX 399 398 help 400 399 SPI driver for Samsung S3C64XX and newer SoCs.
+1 -1
drivers/spi/spi-altera.c
··· 219 219 platform_set_drvdata(pdev, hw); 220 220 221 221 /* setup the state for the bitbang driver */ 222 - hw->bitbang.master = spi_master_get(master); 222 + hw->bitbang.master = master; 223 223 if (!hw->bitbang.master) 224 224 return err; 225 225 hw->bitbang.chipselect = altera_spi_chipsel;
+1 -1
drivers/spi/spi-ath79.c
··· 231 231 master->num_chipselect = pdata->num_chipselect; 232 232 } 233 233 234 - sp->bitbang.master = spi_master_get(master); 234 + sp->bitbang.master = master; 235 235 sp->bitbang.chipselect = ath79_spi_chipselect; 236 236 sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0; 237 237 sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
+25 -25
drivers/spi/spi-atmel.c
··· 170 170 /* Bit manipulation macros */ 171 171 #define SPI_BIT(name) \ 172 172 (1 << SPI_##name##_OFFSET) 173 - #define SPI_BF(name,value) \ 173 + #define SPI_BF(name, value) \ 174 174 (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET) 175 - #define SPI_BFEXT(name,value) \ 175 + #define SPI_BFEXT(name, value) \ 176 176 (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1)) 177 - #define SPI_BFINS(name,value,old) \ 178 - ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \ 179 - | SPI_BF(name,value)) 177 + #define SPI_BFINS(name, value, old) \ 178 + (((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \ 179 + | SPI_BF(name, value)) 180 180 181 181 /* Register access macros */ 182 - #define spi_readl(port,reg) \ 182 + #define spi_readl(port, reg) \ 183 183 __raw_readl((port)->regs + SPI_##reg) 184 - #define spi_writel(port,reg,value) \ 184 + #define spi_writel(port, reg, value) \ 185 185 __raw_writel((value), (port)->regs + SPI_##reg) 186 186 187 187 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and ··· 1401 1401 asd = spi->controller_state; 1402 1402 bits = (asd->csr >> 4) & 0xf; 1403 1403 if (bits != xfer->bits_per_word - 8) { 1404 - dev_dbg(&spi->dev, "you can't yet change " 1405 - "bits_per_word in transfers\n"); 1404 + dev_dbg(&spi->dev, 1405 + "you can't yet change bits_per_word in transfers\n"); 1406 1406 return -ENOPROTOOPT; 1407 1407 } 1408 1408 } ··· 1516 1516 1517 1517 /* setup spi core then atmel-specific driver state */ 1518 1518 ret = -ENOMEM; 1519 - master = spi_alloc_master(&pdev->dev, sizeof *as); 1519 + master = spi_alloc_master(&pdev->dev, sizeof(*as)); 1520 1520 if (!master) 1521 1521 goto out_free; 1522 1522 ··· 1546 1546 INIT_LIST_HEAD(&as->queue); 1547 1547 1548 1548 as->pdev = pdev; 1549 - as->regs = ioremap(regs->start, resource_size(regs)); 1550 - if (!as->regs) 1549 + as->regs = devm_ioremap_resource(&pdev->dev, regs); 1550 + if (IS_ERR(as->regs)) { 1551 + ret = PTR_ERR(as->regs); 1551 1552 goto out_free_buffer; 1553 + } 1552 1554 as->phybase = regs->start; 1553 1555 as->irq = irq; 1554 1556 as->clk = clk; ··· 1619 1617 out_free_irq: 1620 1618 free_irq(irq, master); 1621 1619 out_unmap_regs: 1622 - iounmap(as->regs); 1623 1620 out_free_buffer: 1624 1621 if (!as->use_pdc) 1625 1622 tasklet_kill(&as->tasklet); ··· 1670 1669 clk_disable_unprepare(as->clk); 1671 1670 clk_put(as->clk); 1672 1671 free_irq(as->irq, master); 1673 - iounmap(as->regs); 1674 1672 1675 1673 spi_unregister_master(master); 1676 1674 1677 1675 return 0; 1678 1676 } 1679 1677 1680 - #ifdef CONFIG_PM 1681 - 1682 - static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg) 1678 + #ifdef CONFIG_PM_SLEEP 1679 + static int atmel_spi_suspend(struct device *dev) 1683 1680 { 1684 - struct spi_master *master = platform_get_drvdata(pdev); 1681 + struct spi_master *master = dev_get_drvdata(dev); 1685 1682 struct atmel_spi *as = spi_master_get_devdata(master); 1686 1683 1687 1684 clk_disable_unprepare(as->clk); 1688 1685 return 0; 1689 1686 } 1690 1687 1691 - static int atmel_spi_resume(struct platform_device *pdev) 1688 + static int atmel_spi_resume(struct device *dev) 1692 1689 { 1693 - struct spi_master *master = platform_get_drvdata(pdev); 1690 + struct spi_master *master = dev_get_drvdata(dev); 1694 1691 struct atmel_spi *as = spi_master_get_devdata(master); 1695 1692 1696 - return clk_prepare_enable(as->clk); 1693 + clk_prepare_enable(as->clk); 1697 1694 return 0; 1698 1695 } 1699 1696 1697 + static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume); 1698 + 1699 + #define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops) 1700 1700 #else 1701 - #define atmel_spi_suspend NULL 1702 - #define atmel_spi_resume NULL 1701 + #define ATMEL_SPI_PM_OPS NULL 1703 1702 #endif 1704 1703 1705 1704 #if defined(CONFIG_OF) ··· 1715 1714 .driver = { 1716 1715 .name = "atmel_spi", 1717 1716 .owner = THIS_MODULE, 1717 + .pm = ATMEL_SPI_PM_OPS, 1718 1718 .of_match_table = of_match_ptr(atmel_spi_dt_ids), 1719 1719 }, 1720 - .suspend = atmel_spi_suspend, 1721 - .resume = atmel_spi_resume, 1722 1720 .probe = atmel_spi_probe, 1723 1721 .remove = atmel_spi_remove, 1724 1722 };
+3 -2
drivers/spi/spi-au1550.c
··· 775 775 776 776 hw = spi_master_get_devdata(master); 777 777 778 - hw->master = spi_master_get(master); 778 + hw->master = master; 779 779 hw->pdata = dev_get_platdata(&pdev->dev); 780 780 hw->dev = &pdev->dev; 781 781 ··· 985 985 MODULE_ALIAS("platform:au1550-spi"); 986 986 987 987 static struct platform_driver au1550_spi_drv = { 988 + .probe = au1550_spi_probe, 988 989 .remove = au1550_spi_remove, 989 990 .driver = { 990 991 .name = "au1550-spi", ··· 1005 1004 printk(KERN_ERR "au1550-spi: cannot add memory" 1006 1005 "dbdma device\n"); 1007 1006 } 1008 - return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe); 1007 + return platform_driver_register(&au1550_spi_drv); 1009 1008 } 1010 1009 module_init(au1550_spi_init); 1011 1010
+1 -3
drivers/spi/spi-bcm2835.c
··· 358 358 bcm2835_wr(bs, BCM2835_SPI_CS, 359 359 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); 360 360 361 - err = spi_register_master(master); 361 + err = devm_spi_register_master(&pdev->dev, master); 362 362 if (err) { 363 363 dev_err(&pdev->dev, "could not register SPI master: %d\n", err); 364 364 goto out_free_irq; ··· 381 381 struct bcm2835_spi *bs = spi_master_get_devdata(master); 382 382 383 383 free_irq(bs->irq, master); 384 - spi_unregister_master(master); 385 384 386 385 /* Clear FIFOs, and disable the HW block */ 387 386 bcm2835_wr(bs, BCM2835_SPI_CS, 388 387 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); 389 388 390 389 clk_disable_unprepare(bs->clk); 391 - spi_master_put(master); 392 390 393 391 return 0; 394 392 }
+1 -5
drivers/spi/spi-bcm63xx.c
··· 412 412 bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); 413 413 414 414 /* register and we are done */ 415 - ret = spi_register_master(master); 415 + ret = devm_spi_register_master(dev, master); 416 416 if (ret) { 417 417 dev_err(dev, "spi register failed\n"); 418 418 goto out_clk_disable; ··· 438 438 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 439 439 struct bcm63xx_spi *bs = spi_master_get_devdata(master); 440 440 441 - spi_unregister_master(master); 442 - 443 441 /* reset spi block */ 444 442 bcm_spi_writeb(bs, 0, SPI_INT_MASK); 445 443 446 444 /* HW shutdown */ 447 445 clk_disable_unprepare(bs->clk); 448 446 clk_put(bs->clk); 449 - 450 - spi_master_put(master); 451 447 452 448 return 0; 453 449 }
+15 -14
drivers/spi/spi-bfin-sport.c
··· 592 592 */ 593 593 if (chip_info->ctl_reg || chip_info->enable_dma) { 594 594 ret = -EINVAL; 595 - dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields"); 595 + dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields\n"); 596 596 goto error; 597 597 } 598 598 chip->cs_chg_udelay = chip_info->cs_chg_udelay; ··· 879 879 return 0; 880 880 } 881 881 882 - #ifdef CONFIG_PM 883 - static int 884 - bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state) 882 + #ifdef CONFIG_PM_SLEEP 883 + static int bfin_sport_spi_suspend(struct device *dev) 885 884 { 886 - struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); 885 + struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev); 887 886 int status; 888 887 889 888 status = bfin_sport_spi_stop_queue(drv_data); ··· 895 896 return status; 896 897 } 897 898 898 - static int 899 - bfin_sport_spi_resume(struct platform_device *pdev) 899 + static int bfin_sport_spi_resume(struct device *dev) 900 900 { 901 - struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); 901 + struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev); 902 902 int status; 903 903 904 904 /* Enable the SPI interface */ ··· 910 912 911 913 return status; 912 914 } 915 + 916 + static SIMPLE_DEV_PM_OPS(bfin_sport_spi_pm_ops, bfin_sport_spi_suspend, 917 + bfin_sport_spi_resume); 918 + 919 + #define BFIN_SPORT_SPI_PM_OPS (&bfin_sport_spi_pm_ops) 913 920 #else 914 - # define bfin_sport_spi_suspend NULL 915 - # define bfin_sport_spi_resume NULL 921 + #define BFIN_SPORT_SPI_PM_OPS NULL 916 922 #endif 917 923 918 924 static struct platform_driver bfin_sport_spi_driver = { 919 925 .driver = { 920 - .name = DRV_NAME, 921 - .owner = THIS_MODULE, 926 + .name = DRV_NAME, 927 + .owner = THIS_MODULE, 928 + .pm = BFIN_SPORT_SPI_PM_OPS, 922 929 }, 923 930 .probe = bfin_sport_spi_probe, 924 931 .remove = bfin_sport_spi_remove, 925 - .suspend = bfin_sport_spi_suspend, 926 - .resume = bfin_sport_spi_resume, 927 932 }; 928 933 module_platform_driver(bfin_sport_spi_driver);
+1 -2
drivers/spi/spi-bfin-v3.c
··· 867 867 tasklet_init(&drv_data->pump_transfers, 868 868 bfin_spi_pump_transfers, (unsigned long)drv_data); 869 869 /* register with the SPI framework */ 870 - ret = spi_register_master(master); 870 + ret = devm_spi_register_master(dev, master); 871 871 if (ret) { 872 872 dev_err(dev, "can not register spi master\n"); 873 873 goto err_free_peripheral; ··· 898 898 free_dma(drv_data->rx_dma); 899 899 free_dma(drv_data->tx_dma); 900 900 901 - spi_unregister_master(drv_data->master); 902 901 return 0; 903 902 } 904 903
+26 -22
drivers/spi/spi-bfin5xx.c
··· 524 524 timeout = jiffies + HZ; 525 525 while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF)) 526 526 if (!time_before(jiffies, timeout)) { 527 - dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); 527 + dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF\n"); 528 528 break; 529 529 } else 530 530 cpu_relax(); ··· 913 913 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, 914 914 struct spi_transfer, transfer_list); 915 915 916 - dev_dbg(&drv_data->pdev->dev, "got a message to pump, " 917 - "state is set to: baud %d, flag 0x%x, ctl 0x%x\n", 916 + dev_dbg(&drv_data->pdev->dev, 917 + "got a message to pump, state is set to: baud " 918 + "%d, flag 0x%x, ctl 0x%x\n", 918 919 drv_data->cur_chip->baud, drv_data->cur_chip->flag, 919 920 drv_data->cur_chip->ctl_reg); 920 921 ··· 1014 1013 * but let's assume (for now) they do. 1015 1014 */ 1016 1015 if (chip_info->ctl_reg & ~bfin_ctl_reg) { 1017 - dev_err(&spi->dev, "do not set bits in ctl_reg " 1018 - "that the SPI framework manages\n"); 1016 + dev_err(&spi->dev, 1017 + "do not set bits in ctl_reg that the SPI framework manages\n"); 1019 1018 goto error; 1020 1019 } 1021 1020 chip->enable_dma = chip_info->enable_dma != 0 ··· 1051 1050 chip->chip_select_num = spi->chip_select; 1052 1051 if (chip->chip_select_num < MAX_CTRL_CS) { 1053 1052 if (!(spi->mode & SPI_CPHA)) 1054 - dev_warn(&spi->dev, "Warning: SPI CPHA not set:" 1055 - " Slave Select not under software control!\n" 1056 - " See Documentation/blackfin/bfin-spi-notes.txt"); 1053 + dev_warn(&spi->dev, 1054 + "Warning: SPI CPHA not set: Slave Select not under software control!\n" 1055 + "See Documentation/blackfin/bfin-spi-notes.txt\n"); 1057 1056 1058 1057 chip->flag = (1 << spi->chip_select) << 8; 1059 1058 } else 1060 1059 chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS; 1061 1060 1062 1061 if (chip->enable_dma && chip->pio_interrupt) { 1063 - dev_err(&spi->dev, "enable_dma is set, " 1064 - "do not set pio_interrupt\n"); 1062 + dev_err(&spi->dev, 1063 + "enable_dma is set, do not set pio_interrupt\n"); 1065 1064 goto error; 1066 1065 } 1067 1066 /* ··· 1411 1410 return 0; 1412 1411 } 1413 1412 1414 - #ifdef CONFIG_PM 1415 - static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) 1413 + #ifdef CONFIG_PM_SLEEP 1414 + static int bfin_spi_suspend(struct device *dev) 1416 1415 { 1417 - struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); 1416 + struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev); 1418 1417 int status = 0; 1419 1418 1420 1419 status = bfin_spi_stop_queue(drv_data); ··· 1433 1432 return 0; 1434 1433 } 1435 1434 1436 - static int bfin_spi_resume(struct platform_device *pdev) 1435 + static int bfin_spi_resume(struct device *dev) 1437 1436 { 1438 - struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); 1437 + struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev); 1439 1438 int status = 0; 1440 1439 1441 1440 bfin_write(&drv_data->regs->ctl, drv_data->ctrl_reg); ··· 1444 1443 /* Start the queue running */ 1445 1444 status = bfin_spi_start_queue(drv_data); 1446 1445 if (status != 0) { 1447 - dev_err(&pdev->dev, "problem starting queue (%d)\n", status); 1446 + dev_err(dev, "problem starting queue (%d)\n", status); 1448 1447 return status; 1449 1448 } 1450 1449 1451 1450 return 0; 1452 1451 } 1452 + 1453 + static SIMPLE_DEV_PM_OPS(bfin_spi_pm_ops, bfin_spi_suspend, bfin_spi_resume); 1454 + 1455 + #define BFIN_SPI_PM_OPS (&bfin_spi_pm_ops) 1453 1456 #else 1454 - #define bfin_spi_suspend NULL 1455 - #define bfin_spi_resume NULL 1456 - #endif /* CONFIG_PM */ 1457 + #define BFIN_SPI_PM_OPS NULL 1458 + #endif 1457 1459 1458 1460 MODULE_ALIAS("platform:bfin-spi"); 1459 1461 static struct platform_driver bfin_spi_driver = { 1460 1462 .driver = { 1461 1463 .name = DRV_NAME, 1462 1464 .owner = THIS_MODULE, 1465 + .pm = BFIN_SPI_PM_OPS, 1463 1466 }, 1464 - .suspend = bfin_spi_suspend, 1465 - .resume = bfin_spi_resume, 1467 + .probe = bfin_spi_probe, 1466 1468 .remove = bfin_spi_remove, 1467 1469 }; 1468 1470 1469 1471 static int __init bfin_spi_init(void) 1470 1472 { 1471 - return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); 1473 + return platform_driver_register(&bfin_spi_driver); 1472 1474 } 1473 1475 subsys_initcall(bfin_spi_init); 1474 1476
+18 -7
drivers/spi/spi-bitbang.c
··· 191 191 bitbang = spi_master_get_devdata(spi->master); 192 192 193 193 if (!cs) { 194 - cs = kzalloc(sizeof *cs, GFP_KERNEL); 194 + cs = kzalloc(sizeof(*cs), GFP_KERNEL); 195 195 if (!cs) 196 196 return -ENOMEM; 197 197 spi->controller_state = cs; ··· 258 258 259 259 static int spi_bitbang_prepare_hardware(struct spi_master *spi) 260 260 { 261 - struct spi_bitbang *bitbang; 261 + struct spi_bitbang *bitbang; 262 262 unsigned long flags; 263 263 264 264 bitbang = spi_master_get_devdata(spi); ··· 273 273 static int spi_bitbang_transfer_one(struct spi_master *master, 274 274 struct spi_message *m) 275 275 { 276 - struct spi_bitbang *bitbang; 276 + struct spi_bitbang *bitbang; 277 277 unsigned nsecs; 278 278 struct spi_transfer *t = NULL; 279 279 unsigned cs_change; ··· 292 292 cs_change = 1; 293 293 status = 0; 294 294 295 - list_for_each_entry (t, &m->transfers, transfer_list) { 295 + list_for_each_entry(t, &m->transfers, transfer_list) { 296 296 297 297 /* override speed or wordsize? */ 298 298 if (t->speed_hz || t->bits_per_word) ··· 349 349 if (t->delay_usecs) 350 350 udelay(t->delay_usecs); 351 351 352 - if (cs_change && !list_is_last(&t->transfer_list, &m->transfers)) { 352 + if (cs_change && 353 + !list_is_last(&t->transfer_list, &m->transfers)) { 353 354 /* sometimes a short mid-message deselect of the chip 354 355 * may be needed to terminate a mode or command 355 356 */ ··· 379 378 380 379 static int spi_bitbang_unprepare_hardware(struct spi_master *spi) 381 380 { 382 - struct spi_bitbang *bitbang; 381 + struct spi_bitbang *bitbang; 383 382 unsigned long flags; 384 383 385 384 bitbang = spi_master_get_devdata(spi); ··· 415 414 * This routine registers the spi_master, which will process requests in a 416 415 * dedicated task, keeping IRQs unblocked most of the time. To stop 417 416 * processing those requests, call spi_bitbang_stop(). 417 + * 418 + * On success, this routine will take a reference to master. The caller is 419 + * responsible for calling spi_bitbang_stop() to decrement the reference and 420 + * spi_master_put() as counterpart of spi_alloc_master() to prevent a memory 421 + * leak. 418 422 */ 419 423 int spi_bitbang_start(struct spi_bitbang *bitbang) 420 424 { 421 425 struct spi_master *master = bitbang->master; 426 + int ret; 422 427 423 428 if (!master || !bitbang->chipselect) 424 429 return -EINVAL; ··· 456 449 /* driver may get busy before register() returns, especially 457 450 * if someone registered boardinfo for devices 458 451 */ 459 - return spi_register_master(master); 452 + ret = spi_register_master(spi_master_get(master)); 453 + if (ret) 454 + spi_master_put(master); 455 + 456 + return 0; 460 457 } 461 458 EXPORT_SYMBOL_GPL(spi_bitbang_start); 462 459
+7 -8
drivers/spi/spi-butterfly.c
··· 147 147 148 148 /* we only needed to implement one mode here, and choose SPI_MODE_0 */ 149 149 150 - #define spidelay(X) do{}while(0) 151 - //#define spidelay ndelay 150 + #define spidelay(X) do { } while (0) 151 + /* #define spidelay ndelay */ 152 152 153 153 #include "spi-bitbang-txrx.h" 154 154 ··· 171 171 /* sector 0 = 8 pages * 264 bytes/page (1 block) 172 172 * sector 1 = 248 pages * 264 bytes/page 173 173 */ 174 - .name = "bookkeeping", // 66 KB 174 + .name = "bookkeeping", /* 66 KB */ 175 175 .offset = 0, 176 176 .size = (8 + 248) * 264, 177 - // .mask_flags = MTD_WRITEABLE, 177 + /* .mask_flags = MTD_WRITEABLE, */ 178 178 }, { 179 179 /* sector 2 = 256 pages * 264 bytes/page 180 180 * sectors 3-5 = 512 pages * 264 bytes/page 181 181 */ 182 - .name = "filesystem", // 462 KB 182 + .name = "filesystem", /* 462 KB */ 183 183 .offset = MTDPART_OFS_APPEND, 184 184 .size = MTDPART_SIZ_FULL, 185 185 } }; ··· 209 209 * and no way to be selective about what it binds to. 210 210 */ 211 211 212 - master = spi_alloc_master(dev, sizeof *pp); 212 + master = spi_alloc_master(dev, sizeof(*pp)); 213 213 if (!master) { 214 214 status = -ENOMEM; 215 215 goto done; ··· 225 225 master->bus_num = 42; 226 226 master->num_chipselect = 2; 227 227 228 - pp->bitbang.master = spi_master_get(master); 228 + pp->bitbang.master = master; 229 229 pp->bitbang.chipselect = butterfly_chipselect; 230 230 pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0; 231 231 ··· 289 289 pr_debug("%s: dataflash at %s\n", p->name, 290 290 dev_name(&pp->dataflash->dev)); 291 291 292 - // dev_info(_what?_, ...) 293 292 pr_info("%s: AVR Butterfly\n", p->name); 294 293 butterfly = pp; 295 294 return;
+2 -5
drivers/spi/spi-clps711x.c
··· 226 226 dev_name(&pdev->dev), hw); 227 227 if (ret) { 228 228 dev_err(&pdev->dev, "Can't request IRQ\n"); 229 - goto clk_out; 229 + goto err_out; 230 230 } 231 231 232 - ret = spi_register_master(master); 232 + ret = devm_spi_register_master(&pdev->dev, master); 233 233 if (!ret) { 234 234 dev_info(&pdev->dev, 235 235 "SPI bus driver initialized. Master clock %u Hz\n", ··· 239 239 240 240 dev_err(&pdev->dev, "Failed to register master\n"); 241 241 242 - clk_out: 243 242 err_out: 244 243 while (--i >= 0) 245 244 if (gpio_is_valid(hw->chipselect[i])) ··· 258 259 for (i = 0; i < master->num_chipselect; i++) 259 260 if (gpio_is_valid(hw->chipselect[i])) 260 261 gpio_free(hw->chipselect[i]); 261 - 262 - spi_unregister_master(master); 263 262 264 263 return 0; 265 264 }
+6 -7
drivers/spi/spi-davinci.c
··· 279 279 struct davinci_spi *dspi; 280 280 struct davinci_spi_config *spicfg; 281 281 u8 bits_per_word = 0; 282 - u32 hz = 0, spifmt = 0, prescale = 0; 282 + u32 hz = 0, spifmt = 0; 283 + int prescale; 283 284 284 285 dspi = spi_master_get_devdata(spi->master); 285 286 spicfg = (struct davinci_spi_config *)spi->controller_data; ··· 917 916 if (ret) 918 917 goto unmap_io; 919 918 920 - dspi->bitbang.master = spi_master_get(master); 919 + dspi->bitbang.master = master; 921 920 if (dspi->bitbang.master == NULL) { 922 921 ret = -ENODEV; 923 922 goto irq_free; ··· 926 925 dspi->clk = clk_get(&pdev->dev, NULL); 927 926 if (IS_ERR(dspi->clk)) { 928 927 ret = -ENODEV; 929 - goto put_master; 928 + goto irq_free; 930 929 } 931 930 clk_prepare_enable(dspi->clk); 932 931 ··· 1016 1015 free_clk: 1017 1016 clk_disable_unprepare(dspi->clk); 1018 1017 clk_put(dspi->clk); 1019 - put_master: 1020 - spi_master_put(master); 1021 1018 irq_free: 1022 1019 free_irq(dspi->irq, dspi); 1023 1020 unmap_io: ··· 1023 1024 release_region: 1024 1025 release_mem_region(dspi->pbase, resource_size(r)); 1025 1026 free_master: 1026 - kfree(master); 1027 + spi_master_put(master); 1027 1028 err: 1028 1029 return ret; 1029 1030 } ··· 1050 1051 1051 1052 clk_disable_unprepare(dspi->clk); 1052 1053 clk_put(dspi->clk); 1053 - spi_master_put(master); 1054 1054 free_irq(dspi->irq, dspi); 1055 1055 iounmap(dspi->base); 1056 1056 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1057 1057 release_mem_region(dspi->pbase, resource_size(r)); 1058 + spi_master_put(master); 1058 1059 1059 1060 return 0; 1060 1061 }
+1 -4
drivers/spi/spi-dw-mmio.c
··· 74 74 dwsmmio->clk = clk_get(&pdev->dev, NULL); 75 75 if (IS_ERR(dwsmmio->clk)) { 76 76 ret = PTR_ERR(dwsmmio->clk); 77 - goto err_irq; 77 + goto err_unmap; 78 78 } 79 79 clk_enable(dwsmmio->clk); 80 80 ··· 94 94 clk_disable(dwsmmio->clk); 95 95 clk_put(dwsmmio->clk); 96 96 dwsmmio->clk = NULL; 97 - err_irq: 98 - free_irq(dws->irq, dws); 99 97 err_unmap: 100 98 iounmap(dws->regs); 101 99 err_release_reg: ··· 113 115 clk_put(dwsmmio->clk); 114 116 dwsmmio->clk = NULL; 115 117 116 - free_irq(dwsmmio->dws.irq, &dwsmmio->dws); 117 118 dw_spi_remove_host(&dwsmmio->dws); 118 119 iounmap(dwsmmio->dws.regs); 119 120 kfree(dwsmmio);
+1 -2
drivers/spi/spi-dw-pci.c
··· 40 40 int pci_bar = 0; 41 41 int ret; 42 42 43 - printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n", 43 + dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n", 44 44 pdev->vendor, pdev->device); 45 45 46 46 ret = pci_enable_device(pdev); ··· 109 109 { 110 110 struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); 111 111 112 - pci_set_drvdata(pdev, NULL); 113 112 dw_spi_remove_host(&dwpci->dws); 114 113 iounmap(dwpci->dws.regs); 115 114 pci_release_region(pdev, 0);
+2 -2
drivers/spi/spi-dw.c
··· 870 870 /* Remove the queue */ 871 871 status = destroy_queue(dws); 872 872 if (status != 0) 873 - dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not " 874 - "complete, message memory not freed\n"); 873 + dev_err(&dws->master->dev, 874 + "dw_spi_remove: workqueue will not complete, message memory not freed\n"); 875 875 876 876 if (dws->dma_ops && dws->dma_ops->dma_exit) 877 877 dws->dma_ops->dma_exit(dws);
+4 -8
drivers/spi/spi-efm32.c
··· 280 280 return IRQ_HANDLED; 281 281 } 282 282 283 - static const struct efm32_spi_pdata efm32_spi_pdata_default = { 284 - .location = 1, 285 - }; 286 - 287 283 static u32 efm32_spi_get_configured_location(struct efm32_spi_ddata *ddata) 288 284 { 289 285 u32 reg = efm32_spi_read32(ddata, REG_ROUTE); ··· 343 347 344 348 ddata = spi_master_get_devdata(master); 345 349 346 - ddata->bitbang.master = spi_master_get(master); 350 + ddata->bitbang.master = master; 347 351 ddata->bitbang.chipselect = efm32_spi_chipselect; 348 352 ddata->bitbang.setup_transfer = efm32_spi_setup_transfer; 349 353 ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs; ··· 383 387 goto err; 384 388 } 385 389 386 - if (resource_size(res) < 60) { 390 + if (resource_size(res) < 0x60) { 387 391 ret = -EINVAL; 388 392 dev_err(&pdev->dev, "memory resource too small\n"); 389 393 goto err; ··· 463 467 clk_disable_unprepare(ddata->clk); 464 468 err: 465 469 spi_master_put(master); 466 - kfree(master); 467 470 } 468 471 469 472 return ret; ··· 473 478 struct spi_master *master = platform_get_drvdata(pdev); 474 479 struct efm32_spi_ddata *ddata = spi_master_get_devdata(master); 475 480 481 + spi_bitbang_stop(&ddata->bitbang); 482 + 476 483 efm32_spi_write32(ddata, 0, REG_IEN); 477 484 478 485 free_irq(ddata->txirq, ddata); 479 486 free_irq(ddata->rxirq, ddata); 480 487 clk_disable_unprepare(ddata->clk); 481 488 spi_master_put(master); 482 - kfree(master); 483 489 484 490 return 0; 485 491 }
+3 -4
drivers/spi/spi-ep93xx.c
··· 330 330 331 331 dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", 332 332 chip->spi->mode, div_cpsr, div_scr, dss); 333 - dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); 333 + dev_dbg(&espi->pdev->dev, "setup: cr0 %#x\n", cr0); 334 334 335 335 ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr); 336 336 ep93xx_spi_write_u16(espi, SSPCR0, cr0); ··· 509 509 } 510 510 511 511 if (WARN_ON(len)) { 512 - dev_warn(&espi->pdev->dev, "len = %zu expected 0!", len); 512 + dev_warn(&espi->pdev->dev, "len = %zu expected 0!\n", len); 513 513 return ERR_PTR(-EINVAL); 514 514 } 515 515 ··· 942 942 /* make sure that the hardware is disabled */ 943 943 ep93xx_spi_write_u8(espi, SSPCR1, 0); 944 944 945 - error = spi_register_master(master); 945 + error = devm_spi_register_master(&pdev->dev, master); 946 946 if (error) { 947 947 dev_err(&pdev->dev, "failed to register SPI master\n"); 948 948 goto fail_free_dma; ··· 968 968 969 969 ep93xx_spi_release_dma(espi); 970 970 971 - spi_unregister_master(master); 972 971 return 0; 973 972 } 974 973
+1 -1
drivers/spi/spi-fsl-cpm.c
··· 299 299 300 300 switch (mspi->subblock) { 301 301 default: 302 - dev_warn(dev, "cell-index unspecified, assuming SPI1"); 302 + dev_warn(dev, "cell-index unspecified, assuming SPI1\n"); 303 303 /* fall through */ 304 304 case 0: 305 305 mspi->subblock = QE_CR_SUBBLOCK_SPI1;
+5 -5
drivers/spi/spi-fsl-dspi.c
··· 108 108 struct spi_bitbang bitbang; 109 109 struct platform_device *pdev; 110 110 111 - void *base; 111 + void __iomem *base; 112 112 int irq; 113 113 struct clk *clk; 114 114 ··· 165 165 } 166 166 } 167 167 168 - pr_warn("Can not find valid buad rate,speed_hz is %d,clkrate is %ld\ 168 + pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld\ 169 169 ,we use the max prescaler value.\n", speed_hz, clkrate); 170 170 *pbr = ARRAY_SIZE(pbr_tbl) - 1; 171 171 *br = ARRAY_SIZE(brs) - 1; ··· 450 450 451 451 dspi = spi_master_get_devdata(master); 452 452 dspi->pdev = pdev; 453 - dspi->bitbang.master = spi_master_get(master); 453 + dspi->bitbang.master = master; 454 454 dspi->bitbang.chipselect = dspi_chipselect; 455 455 dspi->bitbang.setup_transfer = dspi_setup_transfer; 456 456 dspi->bitbang.txrx_bufs = dspi_txrx_transfer; ··· 520 520 clk_disable_unprepare(dspi->clk); 521 521 out_master_put: 522 522 spi_master_put(master); 523 - platform_set_drvdata(pdev, NULL); 524 523 525 524 return ret; 526 525 } ··· 530 531 531 532 /* Disconnect from the SPI framework */ 532 533 spi_bitbang_stop(&dspi->bitbang); 534 + clk_disable_unprepare(dspi->clk); 533 535 spi_master_put(dspi->bitbang.master); 534 536 535 537 return 0; ··· 547 547 module_platform_driver(fsl_dspi_driver); 548 548 549 549 MODULE_DESCRIPTION("Freescale DSPI Controller Driver"); 550 - MODULE_LICENSE("GPL v2"); 550 + MODULE_LICENSE("GPL"); 551 551 MODULE_ALIAS("platform:" DRIVER_NAME);
+2 -2
drivers/spi/spi-fsl-espi.c
··· 289 289 if ((first->bits_per_word != t->bits_per_word) || 290 290 (first->speed_hz != t->speed_hz)) { 291 291 espi_trans->status = -EINVAL; 292 - dev_err(mspi->dev, "bits_per_word/speed_hz should be" 293 - " same for the same SPI transfer\n"); 292 + dev_err(mspi->dev, 293 + "bits_per_word/speed_hz should be same for the same SPI transfer\n"); 294 294 return; 295 295 } 296 296
+3 -3
drivers/spi/spi-gpio.c
··· 22 22 #include <linux/init.h> 23 23 #include <linux/platform_device.h> 24 24 #include <linux/gpio.h> 25 + #include <linux/of.h> 25 26 #include <linux/of_device.h> 26 27 #include <linux/of_gpio.h> 27 28 ··· 468 467 } 469 468 #endif 470 469 471 - spi_gpio->bitbang.master = spi_master_get(master); 470 + spi_gpio->bitbang.master = master; 472 471 spi_gpio->bitbang.chipselect = spi_gpio_chipselect; 473 472 474 473 if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { ··· 487 486 488 487 status = spi_bitbang_start(&spi_gpio->bitbang); 489 488 if (status < 0) { 490 - spi_master_put(spi_gpio->bitbang.master); 491 489 gpio_free: 492 490 if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) 493 491 gpio_free(SPI_MISO_GPIO); ··· 510 510 511 511 /* stop() unregisters child devices too */ 512 512 status = spi_bitbang_stop(&spi_gpio->bitbang); 513 - spi_master_put(spi_gpio->bitbang.master); 514 513 515 514 if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) 516 515 gpio_free(SPI_MISO_GPIO); 517 516 if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) 518 517 gpio_free(SPI_MOSI_GPIO); 519 518 gpio_free(SPI_SCK_GPIO); 519 + spi_master_put(spi_gpio->bitbang.master); 520 520 521 521 return status; 522 522 }
+34 -1
drivers/spi/spi-imx.c
··· 749 749 { 750 750 } 751 751 752 + static int 753 + spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg) 754 + { 755 + struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 756 + int ret; 757 + 758 + ret = clk_enable(spi_imx->clk_per); 759 + if (ret) 760 + return ret; 761 + 762 + ret = clk_enable(spi_imx->clk_ipg); 763 + if (ret) { 764 + clk_disable(spi_imx->clk_per); 765 + return ret; 766 + } 767 + 768 + return 0; 769 + } 770 + 771 + static int 772 + spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg) 773 + { 774 + struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 775 + 776 + clk_disable(spi_imx->clk_ipg); 777 + clk_disable(spi_imx->clk_per); 778 + return 0; 779 + } 780 + 752 781 static int spi_imx_probe(struct platform_device *pdev) 753 782 { 754 783 struct device_node *np = pdev->dev.of_node; ··· 815 786 master->num_chipselect = num_cs; 816 787 817 788 spi_imx = spi_master_get_devdata(master); 818 - spi_imx->bitbang.master = spi_master_get(master); 789 + spi_imx->bitbang.master = master; 819 790 820 791 for (i = 0; i < master->num_chipselect; i++) { 821 792 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); ··· 839 810 spi_imx->bitbang.txrx_bufs = spi_imx_transfer; 840 811 spi_imx->bitbang.master->setup = spi_imx_setup; 841 812 spi_imx->bitbang.master->cleanup = spi_imx_cleanup; 813 + spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message; 814 + spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message; 842 815 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 843 816 844 817 init_completion(&spi_imx->xfer_done); ··· 903 872 904 873 dev_info(&pdev->dev, "probed\n"); 905 874 875 + clk_disable(spi_imx->clk_ipg); 876 + clk_disable(spi_imx->clk_per); 906 877 return ret; 907 878 908 879 out_clk_put:
+1 -1
drivers/spi/spi-lm70llp.c
··· 222 222 /* 223 223 * SPI and bitbang hookup. 224 224 */ 225 - pp->bitbang.master = spi_master_get(master); 225 + pp->bitbang.master = master; 226 226 pp->bitbang.chipselect = lm70_chipselect; 227 227 pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx; 228 228 pp->bitbang.flags = SPI_3WIRE;
+1 -3
drivers/spi/spi-mpc512x-psc.c
··· 536 536 if (ret < 0) 537 537 goto free_clock; 538 538 539 - ret = spi_register_master(master); 539 + ret = devm_spi_register_master(dev, master); 540 540 if (ret < 0) 541 541 goto free_clock; 542 542 ··· 559 559 struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); 560 560 struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); 561 561 562 - spi_unregister_master(master); 563 562 clk_disable_unprepare(mps->clk_mclk); 564 563 free_irq(mps->irq, mps); 565 564 if (mps->psc) 566 565 iounmap(mps->psc); 567 - spi_master_put(master); 568 566 569 567 return 0; 570 568 }
+2 -2
drivers/spi/spi-mpc52xx-psc.c
··· 383 383 384 384 mps->irq = irq; 385 385 if (pdata == NULL) { 386 - dev_warn(dev, "probe called without platform data, no " 387 - "cs_control function will be called\n"); 386 + dev_warn(dev, 387 + "probe called without platform data, no cs_control function will be called\n"); 388 388 mps->cs_control = NULL; 389 389 mps->sysclk = 0; 390 390 master->bus_num = bus_num;
+81 -112
drivers/spi/spi-mxs.c
··· 57 57 58 58 #define SG_MAXLEN 0xff00 59 59 60 + /* 61 + * Flags for txrx functions. More efficient that using an argument register for 62 + * each one. 63 + */ 64 + #define TXRX_WRITE (1<<0) /* This is a write */ 65 + #define TXRX_DEASSERT_CS (1<<1) /* De-assert CS at end of txrx */ 66 + 60 67 struct mxs_spi { 61 68 struct mxs_ssp ssp; 62 69 struct completion c; 70 + unsigned int sck; /* Rate requested (vs actual) */ 63 71 }; 64 72 65 73 static int mxs_spi_setup_transfer(struct spi_device *dev, 66 - struct spi_transfer *t) 74 + const struct spi_transfer *t) 67 75 { 68 76 struct mxs_spi *spi = spi_master_get_devdata(dev->master); 69 77 struct mxs_ssp *ssp = &spi->ssp; 70 - uint32_t hz = 0; 78 + const unsigned int hz = min(dev->max_speed_hz, t->speed_hz); 71 79 72 - hz = dev->max_speed_hz; 73 - if (t && t->speed_hz) 74 - hz = min(hz, t->speed_hz); 75 80 if (hz == 0) { 76 - dev_err(&dev->dev, "Cannot continue with zero clock\n"); 81 + dev_err(&dev->dev, "SPI clock rate of zero not allowed\n"); 77 82 return -EINVAL; 78 83 } 79 84 80 - mxs_ssp_set_clk_rate(ssp, hz); 85 + if (hz != spi->sck) { 86 + mxs_ssp_set_clk_rate(ssp, hz); 87 + /* 88 + * Save requested rate, hz, rather than the actual rate, 89 + * ssp->clk_rate. Otherwise we would set the rate every trasfer 90 + * when the actual rate is not quite the same as requested rate. 91 + */ 92 + spi->sck = hz; 93 + /* 94 + * Perhaps we should return an error if the actual clock is 95 + * nowhere close to what was requested? 96 + */ 97 + } 98 + 99 + writel(BM_SSP_CTRL0_LOCK_CS, 100 + ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 81 101 82 102 writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) | 83 - BF_SSP_CTRL1_WORD_LENGTH 84 - (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) | 85 - ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) | 86 - ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0), 87 - ssp->base + HW_SSP_CTRL1(ssp)); 103 + BF_SSP_CTRL1_WORD_LENGTH(BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) | 104 + ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) | 105 + ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0), 106 + ssp->base + HW_SSP_CTRL1(ssp)); 88 107 89 108 writel(0x0, ssp->base + HW_SSP_CMD0); 90 109 writel(0x0, ssp->base + HW_SSP_CMD1); ··· 113 94 114 95 static int mxs_spi_setup(struct spi_device *dev) 115 96 { 116 - int err = 0; 117 - 118 97 if (!dev->bits_per_word) 119 98 dev->bits_per_word = 8; 120 99 121 - if (dev->mode & ~(SPI_CPOL | SPI_CPHA)) 122 - return -EINVAL; 123 - 124 - err = mxs_spi_setup_transfer(dev, NULL); 125 - if (err) { 126 - dev_err(&dev->dev, 127 - "Failed to setup transfer, error = %d\n", err); 128 - } 129 - 130 - return err; 100 + return 0; 131 101 } 132 102 133 - static uint32_t mxs_spi_cs_to_reg(unsigned cs) 103 + static u32 mxs_spi_cs_to_reg(unsigned cs) 134 104 { 135 - uint32_t select = 0; 105 + u32 select = 0; 136 106 137 107 /* 138 108 * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0 ··· 139 131 return select; 140 132 } 141 133 142 - static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs) 143 - { 144 - const uint32_t mask = 145 - BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ; 146 - uint32_t select; 147 - struct mxs_ssp *ssp = &spi->ssp; 148 - 149 - writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 150 - select = mxs_spi_cs_to_reg(cs); 151 - writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 152 - } 153 - 154 - static inline void mxs_spi_enable(struct mxs_spi *spi) 155 - { 156 - struct mxs_ssp *ssp = &spi->ssp; 157 - 158 - writel(BM_SSP_CTRL0_LOCK_CS, 159 - ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 160 - writel(BM_SSP_CTRL0_IGNORE_CRC, 161 - ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 162 - } 163 - 164 - static inline void mxs_spi_disable(struct mxs_spi *spi) 165 - { 166 - struct mxs_ssp *ssp = &spi->ssp; 167 - 168 - writel(BM_SSP_CTRL0_LOCK_CS, 169 - ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 170 - writel(BM_SSP_CTRL0_IGNORE_CRC, 171 - ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 172 - } 173 - 174 134 static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set) 175 135 { 176 136 const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT); 177 137 struct mxs_ssp *ssp = &spi->ssp; 178 - uint32_t reg; 138 + u32 reg; 179 139 180 140 do { 181 141 reg = readl_relaxed(ssp->base + offset); ··· 176 200 return IRQ_HANDLED; 177 201 } 178 202 179 - static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs, 203 + static int mxs_spi_txrx_dma(struct mxs_spi *spi, 180 204 unsigned char *buf, int len, 181 - int *first, int *last, int write) 205 + unsigned int flags) 182 206 { 183 207 struct mxs_ssp *ssp = &spi->ssp; 184 208 struct dma_async_tx_descriptor *desc = NULL; ··· 187 211 const int sgs = DIV_ROUND_UP(len, desc_len); 188 212 int sg_count; 189 213 int min, ret; 190 - uint32_t ctrl0; 214 + u32 ctrl0; 191 215 struct page *vm_page; 192 216 void *sg_buf; 193 217 struct { 194 - uint32_t pio[4]; 218 + u32 pio[4]; 195 219 struct scatterlist sg; 196 220 } *dma_xfer; 197 221 ··· 204 228 205 229 INIT_COMPLETION(spi->c); 206 230 231 + /* Chip select was already programmed into CTRL0 */ 207 232 ctrl0 = readl(ssp->base + HW_SSP_CTRL0); 208 - ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT; 209 - ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs); 233 + ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC | 234 + BM_SSP_CTRL0_READ); 235 + ctrl0 |= BM_SSP_CTRL0_DATA_XFER; 210 236 211 - if (*first) 212 - ctrl0 |= BM_SSP_CTRL0_LOCK_CS; 213 - if (!write) 237 + if (!(flags & TXRX_WRITE)) 214 238 ctrl0 |= BM_SSP_CTRL0_READ; 215 239 216 240 /* Queue the DMA data transfer. */ 217 241 for (sg_count = 0; sg_count < sgs; sg_count++) { 242 + /* Prepare the transfer descriptor. */ 218 243 min = min(len, desc_len); 219 244 220 - /* Prepare the transfer descriptor. */ 221 - if ((sg_count + 1 == sgs) && *last) 245 + /* 246 + * De-assert CS on last segment if flag is set (i.e., no more 247 + * transfers will follow) 248 + */ 249 + if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS)) 222 250 ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC; 223 251 224 252 if (ssp->devid == IMX23_SSP) { ··· 247 267 248 268 sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min); 249 269 ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 250 - write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 270 + (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 251 271 252 272 len -= min; 253 273 buf += min; ··· 267 287 268 288 desc = dmaengine_prep_slave_sg(ssp->dmach, 269 289 &dma_xfer[sg_count].sg, 1, 270 - write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 290 + (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 271 291 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 272 292 273 293 if (!desc) { ··· 304 324 while (--sg_count >= 0) { 305 325 err_mapped: 306 326 dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 307 - write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 327 + (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 308 328 } 309 329 310 330 kfree(dma_xfer); ··· 312 332 return ret; 313 333 } 314 334 315 - static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs, 335 + static int mxs_spi_txrx_pio(struct mxs_spi *spi, 316 336 unsigned char *buf, int len, 317 - int *first, int *last, int write) 337 + unsigned int flags) 318 338 { 319 339 struct mxs_ssp *ssp = &spi->ssp; 320 340 321 - if (*first) 322 - mxs_spi_enable(spi); 323 - 324 - mxs_spi_set_cs(spi, cs); 341 + writel(BM_SSP_CTRL0_IGNORE_CRC, 342 + ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 325 343 326 344 while (len--) { 327 - if (*last && len == 0) 328 - mxs_spi_disable(spi); 345 + if (len == 0 && (flags & TXRX_DEASSERT_CS)) 346 + writel(BM_SSP_CTRL0_IGNORE_CRC, 347 + ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 329 348 330 349 if (ssp->devid == IMX23_SSP) { 331 350 writel(BM_SSP_CTRL0_XFER_COUNT, ··· 335 356 writel(1, ssp->base + HW_SSP_XFER_SIZE); 336 357 } 337 358 338 - if (write) 359 + if (flags & TXRX_WRITE) 339 360 writel(BM_SSP_CTRL0_READ, 340 361 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 341 362 else ··· 348 369 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1)) 349 370 return -ETIMEDOUT; 350 371 351 - if (write) 372 + if (flags & TXRX_WRITE) 352 373 writel(*buf, ssp->base + HW_SSP_DATA(ssp)); 353 374 354 375 writel(BM_SSP_CTRL0_DATA_XFER, 355 376 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 356 377 357 - if (!write) { 378 + if (!(flags & TXRX_WRITE)) { 358 379 if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp), 359 380 BM_SSP_STATUS_FIFO_EMPTY, 0)) 360 381 return -ETIMEDOUT; ··· 379 400 { 380 401 struct mxs_spi *spi = spi_master_get_devdata(master); 381 402 struct mxs_ssp *ssp = &spi->ssp; 382 - int first, last; 383 403 struct spi_transfer *t, *tmp_t; 404 + unsigned int flag; 384 405 int status = 0; 385 - int cs; 386 406 387 - first = last = 0; 388 - 389 - cs = m->spi->chip_select; 407 + /* Program CS register bits here, it will be used for all transfers. */ 408 + writel(BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ, 409 + ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 410 + writel(mxs_spi_cs_to_reg(m->spi->chip_select), 411 + ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 390 412 391 413 list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) { 392 414 ··· 395 415 if (status) 396 416 break; 397 417 398 - if (&t->transfer_list == m->transfers.next) 399 - first = 1; 400 - if (&t->transfer_list == m->transfers.prev) 401 - last = 1; 402 - if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) { 403 - dev_err(ssp->dev, 404 - "Cannot send and receive simultaneously\n"); 405 - status = -EINVAL; 406 - break; 407 - } 418 + /* De-assert on last transfer, inverted by cs_change flag */ 419 + flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ? 420 + TXRX_DEASSERT_CS : 0; 408 421 409 422 /* 410 423 * Small blocks can be transfered via PIO. ··· 414 441 STMP_OFFSET_REG_CLR); 415 442 416 443 if (t->tx_buf) 417 - status = mxs_spi_txrx_pio(spi, cs, 444 + status = mxs_spi_txrx_pio(spi, 418 445 (void *)t->tx_buf, 419 - t->len, &first, &last, 1); 446 + t->len, flag | TXRX_WRITE); 420 447 if (t->rx_buf) 421 - status = mxs_spi_txrx_pio(spi, cs, 448 + status = mxs_spi_txrx_pio(spi, 422 449 t->rx_buf, t->len, 423 - &first, &last, 0); 450 + flag); 424 451 } else { 425 452 writel(BM_SSP_CTRL1_DMA_ENABLE, 426 453 ssp->base + HW_SSP_CTRL1(ssp) + 427 454 STMP_OFFSET_REG_SET); 428 455 429 456 if (t->tx_buf) 430 - status = mxs_spi_txrx_dma(spi, cs, 457 + status = mxs_spi_txrx_dma(spi, 431 458 (void *)t->tx_buf, t->len, 432 - &first, &last, 1); 459 + flag | TXRX_WRITE); 433 460 if (t->rx_buf) 434 - status = mxs_spi_txrx_dma(spi, cs, 461 + status = mxs_spi_txrx_dma(spi, 435 462 t->rx_buf, t->len, 436 - &first, &last, 0); 463 + flag); 437 464 } 438 465 439 466 if (status) { ··· 442 469 } 443 470 444 471 m->actual_length += t->len; 445 - first = last = 0; 446 472 } 447 473 448 474 m->status = status; ··· 535 563 goto out_dma_release; 536 564 537 565 clk_set_rate(ssp->clk, clk_freq); 538 - ssp->clk_rate = clk_get_rate(ssp->clk) / 1000; 539 566 540 567 ret = stmp_reset_block(ssp->base); 541 568 if (ret) ··· 542 571 543 572 platform_set_drvdata(pdev, master); 544 573 545 - ret = spi_register_master(master); 574 + ret = devm_spi_register_master(&pdev->dev, master); 546 575 if (ret) { 547 576 dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret); 548 577 goto out_disable_clk; ··· 569 598 spi = spi_master_get_devdata(master); 570 599 ssp = &spi->ssp; 571 600 572 - spi_unregister_master(master); 573 601 clk_disable_unprepare(ssp->clk); 574 602 dma_release_channel(ssp->dmach); 575 - spi_master_put(master); 576 603 577 604 return 0; 578 605 }
+1 -2
drivers/spi/spi-nuc900.c
··· 349 349 } 350 350 351 351 hw = spi_master_get_devdata(master); 352 - hw->master = spi_master_get(master); 352 + hw->master = master; 353 353 hw->pdata = dev_get_platdata(&pdev->dev); 354 354 hw->dev = &pdev->dev; 355 355 ··· 435 435 kfree(hw->ioarea); 436 436 err_pdata: 437 437 spi_master_put(hw->master); 438 - 439 438 err_nomem: 440 439 return err; 441 440 }
+1 -1
drivers/spi/spi-oc-tiny.c
··· 306 306 platform_set_drvdata(pdev, hw); 307 307 308 308 /* setup the state for the bitbang driver */ 309 - hw->bitbang.master = spi_master_get(master); 309 + hw->bitbang.master = master; 310 310 if (!hw->bitbang.master) 311 311 return err; 312 312 hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
+1 -3
drivers/spi/spi-octeon.c
··· 272 272 master->bits_per_word_mask = SPI_BPW_MASK(8); 273 273 274 274 master->dev.of_node = pdev->dev.of_node; 275 - err = spi_register_master(master); 275 + err = devm_spi_register_master(&pdev->dev, master); 276 276 if (err) { 277 277 dev_err(&pdev->dev, "register master failed: %d\n", err); 278 278 goto fail; ··· 291 291 struct spi_master *master = platform_get_drvdata(pdev); 292 292 struct octeon_spi *p = spi_master_get_devdata(master); 293 293 u64 register_base = p->register_base; 294 - 295 - spi_unregister_master(master); 296 294 297 295 /* Clear the CSENA* and put everything in a known state. */ 298 296 cvmx_write_csr(register_base + OCTEON_SPI_CFG, 0);
+1 -3
drivers/spi/spi-omap-100k.c
··· 457 457 goto err; 458 458 } 459 459 460 - status = spi_register_master(master); 460 + status = devm_spi_register_master(&pdev->dev, master); 461 461 if (status < 0) 462 462 goto err; 463 463 ··· 484 484 return status; 485 485 486 486 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 487 - 488 - spi_unregister_master(master); 489 487 490 488 return 0; 491 489 }
+3 -2
drivers/spi/spi-omap-uwire.c
··· 557 557 .name = "omap_uwire", 558 558 .owner = THIS_MODULE, 559 559 }, 560 - .remove = uwire_remove, 560 + .probe = uwire_probe, 561 + .remove = uwire_remove, 561 562 // suspend ... unuse ck 562 563 // resume ... use ck 563 564 }; ··· 580 579 omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9); 581 580 } 582 581 583 - return platform_driver_probe(&uwire_driver, uwire_probe); 582 + return platform_driver_register(&uwire_driver); 584 583 } 585 584 586 585 static void __exit omap_uwire_exit(void)
+12 -7
drivers/spi/spi-omap2-mcspi.c
··· 276 276 struct omap2_mcspi_cs *cs = spi->controller_state; 277 277 struct omap2_mcspi *mcspi; 278 278 unsigned int wcnt; 279 - int fifo_depth, bytes_per_word; 279 + int max_fifo_depth, fifo_depth, bytes_per_word; 280 280 u32 chconf, xferlevel; 281 281 282 282 mcspi = spi_master_get_devdata(master); ··· 287 287 if (t->len % bytes_per_word != 0) 288 288 goto disable_fifo; 289 289 290 - fifo_depth = gcd(t->len, OMAP2_MCSPI_MAX_FIFODEPTH); 290 + if (t->rx_buf != NULL && t->tx_buf != NULL) 291 + max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2; 292 + else 293 + max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH; 294 + 295 + fifo_depth = gcd(t->len, max_fifo_depth); 291 296 if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0) 292 297 goto disable_fifo; 293 298 ··· 304 299 if (t->rx_buf != NULL) { 305 300 chconf |= OMAP2_MCSPI_CHCONF_FFER; 306 301 xferlevel |= (fifo_depth - 1) << 8; 307 - } else { 302 + } 303 + if (t->tx_buf != NULL) { 308 304 chconf |= OMAP2_MCSPI_CHCONF_FFET; 309 305 xferlevel |= fifo_depth - 1; 310 306 } ··· 504 498 ((u32 *)xfer->rx_buf)[elements++] = w; 505 499 } else { 506 500 int bytes_per_word = mcspi_bytes_per_word(word_len); 507 - dev_err(&spi->dev, "DMA RX penultimate word empty"); 501 + dev_err(&spi->dev, "DMA RX penultimate word empty\n"); 508 502 count -= (bytes_per_word << 1); 509 503 omap2_mcspi_set_enable(spi, 1); 510 504 return count; ··· 522 516 else /* word_len <= 32 */ 523 517 ((u32 *)xfer->rx_buf)[elements] = w; 524 518 } else { 525 - dev_err(&spi->dev, "DMA RX last word empty"); 519 + dev_err(&spi->dev, "DMA RX last word empty\n"); 526 520 count -= mcspi_bytes_per_word(word_len); 527 521 } 528 522 omap2_mcspi_set_enable(spi, 1); ··· 1413 1407 if (status < 0) 1414 1408 goto disable_pm; 1415 1409 1416 - status = spi_register_master(master); 1410 + status = devm_spi_register_master(&pdev->dev, master); 1417 1411 if (status < 0) 1418 1412 goto disable_pm; 1419 1413 ··· 1441 1435 pm_runtime_put_sync(mcspi->dev); 1442 1436 pm_runtime_disable(&pdev->dev); 1443 1437 1444 - spi_unregister_master(master); 1445 1438 kfree(dma_channels); 1446 1439 1447 1440 return 0;
+4 -6
drivers/spi/spi-orion.c
··· 84 84 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG, 85 85 ORION_SPI_IF_8_16_BIT_MODE); 86 86 } else { 87 - pr_debug("Bad bits per word value %d (only 8 or 16 are " 88 - "allowed).\n", size); 87 + pr_debug("Bad bits per word value %d (only 8 or 16 are allowed).\n", 88 + size); 89 89 return -EINVAL; 90 90 } 91 91 ··· 407 407 const u32 *iprop; 408 408 int size; 409 409 410 - master = spi_alloc_master(&pdev->dev, sizeof *spi); 410 + master = spi_alloc_master(&pdev->dev, sizeof(*spi)); 411 411 if (master == NULL) { 412 412 dev_dbg(&pdev->dev, "master allocation failed\n"); 413 413 return -ENOMEM; ··· 457 457 goto out_rel_clk; 458 458 459 459 master->dev.of_node = pdev->dev.of_node; 460 - status = spi_register_master(master); 460 + status = devm_spi_register_master(&pdev->dev, master); 461 461 if (status < 0) 462 462 goto out_rel_clk; 463 463 ··· 482 482 483 483 clk_disable_unprepare(spi->clk); 484 484 clk_put(spi->clk); 485 - 486 - spi_unregister_master(master); 487 485 488 486 return 0; 489 487 }
+3 -7
drivers/spi/spi-pl022.c
··· 1619 1619 dev_err(&pl022->adev->dev, 1620 1620 "RX FIFO Trigger Level is configured incorrectly\n"); 1621 1621 return -EINVAL; 1622 - break; 1623 1622 } 1624 1623 switch (chip_info->tx_lev_trig) { 1625 1624 case SSP_TX_1_OR_MORE_EMPTY_LOC: ··· 1644 1645 dev_err(&pl022->adev->dev, 1645 1646 "TX FIFO Trigger Level is configured incorrectly\n"); 1646 1647 return -EINVAL; 1647 - break; 1648 1648 } 1649 1649 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { 1650 1650 if ((chip_info->ctrl_len < SSP_BITS_4) ··· 2173 2175 status = -ENOMEM; 2174 2176 goto err_no_ioremap; 2175 2177 } 2176 - printk(KERN_INFO "pl022: mapped registers from %pa to %p\n", 2177 - &adev->res.start, pl022->virtbase); 2178 + dev_info(&adev->dev, "mapped registers from %pa to %p\n", 2179 + &adev->res.start, pl022->virtbase); 2178 2180 2179 2181 pl022->clk = devm_clk_get(&adev->dev, NULL); 2180 2182 if (IS_ERR(pl022->clk)) { ··· 2225 2227 2226 2228 /* Register with the SPI framework */ 2227 2229 amba_set_drvdata(adev, pl022); 2228 - status = spi_register_master(master); 2230 + status = devm_spi_register_master(&adev->dev, master); 2229 2231 if (status != 0) { 2230 2232 dev_err(&adev->dev, 2231 2233 "probe - problem registering spi master\n"); ··· 2285 2287 clk_unprepare(pl022->clk); 2286 2288 amba_release_regions(adev); 2287 2289 tasklet_disable(&pl022->pump_transfers); 2288 - spi_unregister_master(pl022->master); 2289 - amba_set_drvdata(adev, NULL); 2290 2290 return 0; 2291 2291 } 2292 2292
+2 -1
drivers/spi/spi-ppc4xx.c
··· 396 396 master->dev.of_node = np; 397 397 platform_set_drvdata(op, master); 398 398 hw = spi_master_get_devdata(master); 399 - hw->master = spi_master_get(master); 399 + hw->master = master; 400 400 hw->dev = dev; 401 401 402 402 init_completion(&hw->done); ··· 558 558 free_irq(hw->irqnum, hw); 559 559 iounmap(hw->regs); 560 560 free_gpios(hw); 561 + spi_master_put(master); 561 562 return 0; 562 563 } 563 564
+17 -24
drivers/spi/spi-pxa2xx.c
··· 573 573 write_SSTO(0, reg); 574 574 write_SSSR_CS(drv_data, drv_data->clear_sr); 575 575 576 - dev_err(&drv_data->pdev->dev, "bad message state " 577 - "in interrupt handler\n"); 576 + dev_err(&drv_data->pdev->dev, 577 + "bad message state in interrupt handler\n"); 578 578 579 579 /* Never fail */ 580 580 return IRQ_HANDLED; ··· 651 651 if (message->is_dma_mapped 652 652 || transfer->rx_dma || transfer->tx_dma) { 653 653 dev_err(&drv_data->pdev->dev, 654 - "pump_transfers: mapped transfer length " 655 - "of %u is greater than %d\n", 654 + "pump_transfers: mapped transfer length of " 655 + "%u is greater than %d\n", 656 656 transfer->len, MAX_DMA_LEN); 657 657 message->status = -EINVAL; 658 658 giveback(drv_data); ··· 660 660 } 661 661 662 662 /* warn ... we force this to PIO mode */ 663 - if (printk_ratelimit()) 664 - dev_warn(&message->spi->dev, "pump_transfers: " 665 - "DMA disabled for transfer length %ld " 666 - "greater than %d\n", 667 - (long)drv_data->len, MAX_DMA_LEN); 663 + dev_warn_ratelimited(&message->spi->dev, 664 + "pump_transfers: DMA disabled for transfer length %ld " 665 + "greater than %d\n", 666 + (long)drv_data->len, MAX_DMA_LEN); 668 667 } 669 668 670 669 /* Setup the transfer state based on the type of transfer */ ··· 725 726 message->spi, 726 727 bits, &dma_burst, 727 728 &dma_thresh)) 728 - if (printk_ratelimit()) 729 - dev_warn(&message->spi->dev, 730 - "pump_transfers: " 731 - "DMA burst size reduced to " 732 - "match bits_per_word\n"); 729 + dev_warn_ratelimited(&message->spi->dev, 730 + "pump_transfers: DMA burst size reduced to match bits_per_word\n"); 733 731 } 734 732 735 733 cr0 = clk_div ··· 850 854 if (gpio_is_valid(chip_info->gpio_cs)) { 851 855 err = gpio_request(chip_info->gpio_cs, "SPI_CS"); 852 856 if (err) { 853 - dev_err(&spi->dev, "failed to request chip select " 854 - "GPIO%d\n", chip_info->gpio_cs); 857 + dev_err(&spi->dev, "failed to request chip select GPIO%d\n", 858 + chip_info->gpio_cs); 855 859 return err; 856 860 } 857 861 ··· 895 899 896 900 if (drv_data->ssp_type == CE4100_SSP) { 897 901 if (spi->chip_select > 4) { 898 - dev_err(&spi->dev, "failed setup: " 899 - "cs number must not be > 4.\n"); 902 + dev_err(&spi->dev, 903 + "failed setup: cs number must not be > 4.\n"); 900 904 kfree(chip); 901 905 return -EINVAL; 902 906 } ··· 952 956 spi->bits_per_word, 953 957 &chip->dma_burst_size, 954 958 &chip->dma_threshold)) { 955 - dev_warn(&spi->dev, "in setup: DMA burst size reduced " 956 - "to match bits_per_word\n"); 959 + dev_warn(&spi->dev, 960 + "in setup: DMA burst size reduced to match bits_per_word\n"); 957 961 } 958 962 } 959 963 ··· 1201 1205 1202 1206 /* Register with the SPI framework */ 1203 1207 platform_set_drvdata(pdev, drv_data); 1204 - status = spi_register_master(master); 1208 + status = devm_spi_register_master(&pdev->dev, master); 1205 1209 if (status != 0) { 1206 1210 dev_err(&pdev->dev, "problem registering spi master\n"); 1207 1211 goto out_error_clock_enabled; ··· 1252 1256 1253 1257 /* Release SSP */ 1254 1258 pxa_ssp_free(ssp); 1255 - 1256 - /* Disconnect from the SPI framework */ 1257 - spi_unregister_master(drv_data->master); 1258 1259 1259 1260 return 0; 1260 1261 }
+234 -42
drivers/spi/spi-rspi.c
··· 59 59 #define RSPI_SPCMD6 0x1c 60 60 #define RSPI_SPCMD7 0x1e 61 61 62 + /*qspi only */ 63 + #define QSPI_SPBFCR 0x18 64 + #define QSPI_SPBDCR 0x1a 65 + #define QSPI_SPBMUL0 0x1c 66 + #define QSPI_SPBMUL1 0x20 67 + #define QSPI_SPBMUL2 0x24 68 + #define QSPI_SPBMUL3 0x28 69 + 62 70 /* SPCR */ 63 71 #define SPCR_SPRIE 0x80 64 72 #define SPCR_SPE 0x40 ··· 134 126 #define SPCMD_LSBF 0x1000 135 127 #define SPCMD_SPB_MASK 0x0f00 136 128 #define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK) 129 + #define SPCMD_SPB_8BIT 0x0000 /* qspi only */ 130 + #define SPCMD_SPB_16BIT 0x0100 137 131 #define SPCMD_SPB_20BIT 0x0000 138 132 #define SPCMD_SPB_24BIT 0x0100 139 133 #define SPCMD_SPB_32BIT 0x0200 ··· 144 134 #define SPCMD_BRDV_MASK 0x000c 145 135 #define SPCMD_CPOL 0x0002 146 136 #define SPCMD_CPHA 0x0001 137 + 138 + /* SPBFCR */ 139 + #define SPBFCR_TXRST 0x80 /* qspi only */ 140 + #define SPBFCR_RXRST 0x40 /* qspi only */ 147 141 148 142 struct rspi_data { 149 143 void __iomem *addr; ··· 159 145 spinlock_t lock; 160 146 struct clk *clk; 161 147 unsigned char spsr; 148 + const struct spi_ops *ops; 162 149 163 150 /* for dmaengine */ 164 151 struct dma_chan *chan_tx; ··· 180 165 iowrite16(data, rspi->addr + offset); 181 166 } 182 167 168 + static void rspi_write32(struct rspi_data *rspi, u32 data, u16 offset) 169 + { 170 + iowrite32(data, rspi->addr + offset); 171 + } 172 + 183 173 static u8 rspi_read8(struct rspi_data *rspi, u16 offset) 184 174 { 185 175 return ioread8(rspi->addr + offset); ··· 195 175 return ioread16(rspi->addr + offset); 196 176 } 197 177 198 - static unsigned char rspi_calc_spbr(struct rspi_data *rspi) 178 + /* optional functions */ 179 + struct spi_ops { 180 + int (*set_config_register)(struct rspi_data *rspi, int access_size); 181 + int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg, 182 + struct spi_transfer *t); 183 + int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg, 184 + struct spi_transfer *t); 185 + 186 + }; 187 + 188 + /* 189 + * functions for RSPI 190 + */ 191 + static int rspi_set_config_register(struct rspi_data *rspi, int access_size) 199 192 { 200 - int tmp; 201 - unsigned char spbr; 193 + int spbr; 202 194 203 - tmp = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1; 204 - spbr = clamp(tmp, 0, 255); 195 + /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */ 196 + rspi_write8(rspi, 0x00, RSPI_SPPCR); 205 197 206 - return spbr; 198 + /* Sets transfer bit rate */ 199 + spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1; 200 + rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); 201 + 202 + /* Sets number of frames to be used: 1 frame */ 203 + rspi_write8(rspi, 0x00, RSPI_SPDCR); 204 + 205 + /* Sets RSPCK, SSL, next-access delay value */ 206 + rspi_write8(rspi, 0x00, RSPI_SPCKD); 207 + rspi_write8(rspi, 0x00, RSPI_SSLND); 208 + rspi_write8(rspi, 0x00, RSPI_SPND); 209 + 210 + /* Sets parity, interrupt mask */ 211 + rspi_write8(rspi, 0x00, RSPI_SPCR2); 212 + 213 + /* Sets SPCMD */ 214 + rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP, 215 + RSPI_SPCMD0); 216 + 217 + /* Sets RSPI mode */ 218 + rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR); 219 + 220 + return 0; 207 221 } 222 + 223 + /* 224 + * functions for QSPI 225 + */ 226 + static int qspi_set_config_register(struct rspi_data *rspi, int access_size) 227 + { 228 + u16 spcmd; 229 + int spbr; 230 + 231 + /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */ 232 + rspi_write8(rspi, 0x00, RSPI_SPPCR); 233 + 234 + /* Sets transfer bit rate */ 235 + spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz); 236 + rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); 237 + 238 + /* Sets number of frames to be used: 1 frame */ 239 + rspi_write8(rspi, 0x00, RSPI_SPDCR); 240 + 241 + /* Sets RSPCK, SSL, next-access delay value */ 242 + rspi_write8(rspi, 0x00, RSPI_SPCKD); 243 + rspi_write8(rspi, 0x00, RSPI_SSLND); 244 + rspi_write8(rspi, 0x00, RSPI_SPND); 245 + 246 + /* Data Length Setting */ 247 + if (access_size == 8) 248 + spcmd = SPCMD_SPB_8BIT; 249 + else if (access_size == 16) 250 + spcmd = SPCMD_SPB_16BIT; 251 + else if (access_size == 32) 252 + spcmd = SPCMD_SPB_32BIT; 253 + 254 + spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SSLKP | SPCMD_SPNDEN; 255 + 256 + /* Resets transfer data length */ 257 + rspi_write32(rspi, 0, QSPI_SPBMUL0); 258 + 259 + /* Resets transmit and receive buffer */ 260 + rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR); 261 + /* Sets buffer to allow normal operation */ 262 + rspi_write8(rspi, 0x00, QSPI_SPBFCR); 263 + 264 + /* Sets SPCMD */ 265 + rspi_write16(rspi, spcmd, RSPI_SPCMD0); 266 + 267 + /* Enables SPI function in a master mode */ 268 + rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR); 269 + 270 + return 0; 271 + } 272 + 273 + #define set_config_register(spi, n) spi->ops->set_config_register(spi, n) 208 274 209 275 static void rspi_enable_irq(struct rspi_data *rspi, u8 enable) 210 276 { ··· 326 220 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR); 327 221 } 328 222 329 - static int rspi_set_config_register(struct rspi_data *rspi, int access_size) 330 - { 331 - /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */ 332 - rspi_write8(rspi, 0x00, RSPI_SPPCR); 333 - 334 - /* Sets transfer bit rate */ 335 - rspi_write8(rspi, rspi_calc_spbr(rspi), RSPI_SPBR); 336 - 337 - /* Sets number of frames to be used: 1 frame */ 338 - rspi_write8(rspi, 0x00, RSPI_SPDCR); 339 - 340 - /* Sets RSPCK, SSL, next-access delay value */ 341 - rspi_write8(rspi, 0x00, RSPI_SPCKD); 342 - rspi_write8(rspi, 0x00, RSPI_SSLND); 343 - rspi_write8(rspi, 0x00, RSPI_SPND); 344 - 345 - /* Sets parity, interrupt mask */ 346 - rspi_write8(rspi, 0x00, RSPI_SPCR2); 347 - 348 - /* Sets SPCMD */ 349 - rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP, 350 - RSPI_SPCMD0); 351 - 352 - /* Sets RSPI mode */ 353 - rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR); 354 - 355 - return 0; 356 - } 357 - 358 223 static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg, 359 224 struct spi_transfer *t) 360 225 { ··· 353 276 354 277 return 0; 355 278 } 279 + 280 + static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg, 281 + struct spi_transfer *t) 282 + { 283 + int remain = t->len; 284 + u8 *data; 285 + 286 + rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR); 287 + rspi_write8(rspi, 0x00, QSPI_SPBFCR); 288 + 289 + data = (u8 *)t->tx_buf; 290 + while (remain > 0) { 291 + 292 + if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) { 293 + dev_err(&rspi->master->dev, 294 + "%s: tx empty timeout\n", __func__); 295 + return -ETIMEDOUT; 296 + } 297 + rspi_write8(rspi, *data++, RSPI_SPDR); 298 + 299 + if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) { 300 + dev_err(&rspi->master->dev, 301 + "%s: receive timeout\n", __func__); 302 + return -ETIMEDOUT; 303 + } 304 + rspi_read8(rspi, RSPI_SPDR); 305 + 306 + remain--; 307 + } 308 + 309 + /* Waiting for the last transmition */ 310 + rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); 311 + 312 + return 0; 313 + } 314 + 315 + #define send_pio(spi, mesg, t) spi->ops->send_pio(spi, mesg, t) 356 316 357 317 static void rspi_dma_complete(void *arg) 358 318 { ··· 556 442 return 0; 557 443 } 558 444 445 + static void qspi_receive_init(struct rspi_data *rspi) 446 + { 447 + unsigned char spsr; 448 + 449 + spsr = rspi_read8(rspi, RSPI_SPSR); 450 + if (spsr & SPSR_SPRF) 451 + rspi_read8(rspi, RSPI_SPDR); /* dummy read */ 452 + rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR); 453 + rspi_write8(rspi, 0x00, QSPI_SPBFCR); 454 + } 455 + 456 + static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg, 457 + struct spi_transfer *t) 458 + { 459 + int remain = t->len; 460 + u8 *data; 461 + 462 + qspi_receive_init(rspi); 463 + 464 + data = (u8 *)t->rx_buf; 465 + while (remain > 0) { 466 + 467 + if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) { 468 + dev_err(&rspi->master->dev, 469 + "%s: tx empty timeout\n", __func__); 470 + return -ETIMEDOUT; 471 + } 472 + /* dummy write for generate clock */ 473 + rspi_write8(rspi, 0x00, RSPI_SPDR); 474 + 475 + if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) { 476 + dev_err(&rspi->master->dev, 477 + "%s: receive timeout\n", __func__); 478 + return -ETIMEDOUT; 479 + } 480 + /* SPDR allows 8, 16 or 32-bit access */ 481 + *data++ = rspi_read8(rspi, RSPI_SPDR); 482 + remain--; 483 + } 484 + 485 + return 0; 486 + } 487 + 488 + #define receive_pio(spi, mesg, t) spi->ops->receive_pio(spi, mesg, t) 489 + 559 490 static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) 560 491 { 561 492 struct scatterlist sg, sg_dummy; ··· 740 581 if (rspi_is_dma(rspi, t)) 741 582 ret = rspi_send_dma(rspi, t); 742 583 else 743 - ret = rspi_send_pio(rspi, mesg, t); 584 + ret = send_pio(rspi, mesg, t); 744 585 if (ret < 0) 745 586 goto error; 746 587 } ··· 748 589 if (rspi_is_dma(rspi, t)) 749 590 ret = rspi_receive_dma(rspi, t); 750 591 else 751 - ret = rspi_receive_pio(rspi, mesg, t); 592 + ret = receive_pio(rspi, mesg, t); 752 593 if (ret < 0) 753 594 goto error; 754 595 } ··· 775 616 spi->bits_per_word = 8; 776 617 rspi->max_speed_hz = spi->max_speed_hz; 777 618 778 - rspi_set_config_register(rspi, 8); 619 + set_config_register(rspi, 8); 779 620 780 621 return 0; 781 622 } ··· 904 745 struct rspi_data *rspi; 905 746 int ret, irq; 906 747 char clk_name[16]; 748 + struct rspi_plat_data *rspi_pd = pdev->dev.platform_data; 749 + const struct spi_ops *ops; 750 + const struct platform_device_id *id_entry = pdev->id_entry; 907 751 752 + ops = (struct spi_ops *)id_entry->driver_data; 753 + /* ops parameter check */ 754 + if (!ops->set_config_register) { 755 + dev_err(&pdev->dev, "there is no set_config_register\n"); 756 + return -ENODEV; 757 + } 908 758 /* get base addr */ 909 759 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 910 760 if (unlikely(res == NULL)) { ··· 935 767 936 768 rspi = spi_master_get_devdata(master); 937 769 platform_set_drvdata(pdev, rspi); 938 - 770 + rspi->ops = ops; 939 771 rspi->master = master; 940 772 rspi->addr = ioremap(res->start, resource_size(res)); 941 773 if (rspi->addr == NULL) { ··· 944 776 goto error1; 945 777 } 946 778 947 - snprintf(clk_name, sizeof(clk_name), "rspi%d", pdev->id); 779 + snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id); 948 780 rspi->clk = clk_get(&pdev->dev, clk_name); 949 781 if (IS_ERR(rspi->clk)) { 950 782 dev_err(&pdev->dev, "cannot get clock\n"); ··· 958 790 INIT_WORK(&rspi->ws, rspi_work); 959 791 init_waitqueue_head(&rspi->wait); 960 792 961 - master->num_chipselect = 2; 793 + master->num_chipselect = rspi_pd->num_chipselect; 794 + if (!master->num_chipselect) 795 + master->num_chipselect = 2; /* default */ 796 + 962 797 master->bus_num = pdev->id; 963 798 master->setup = rspi_setup; 964 799 master->transfer = rspi_transfer; ··· 1003 832 return ret; 1004 833 } 1005 834 835 + static struct spi_ops rspi_ops = { 836 + .set_config_register = rspi_set_config_register, 837 + .send_pio = rspi_send_pio, 838 + .receive_pio = rspi_receive_pio, 839 + }; 840 + 841 + static struct spi_ops qspi_ops = { 842 + .set_config_register = qspi_set_config_register, 843 + .send_pio = qspi_send_pio, 844 + .receive_pio = qspi_receive_pio, 845 + }; 846 + 847 + static struct platform_device_id spi_driver_ids[] = { 848 + { "rspi", (kernel_ulong_t)&rspi_ops }, 849 + { "qspi", (kernel_ulong_t)&qspi_ops }, 850 + {}, 851 + }; 852 + 853 + MODULE_DEVICE_TABLE(platform, spi_driver_ids); 854 + 1006 855 static struct platform_driver rspi_driver = { 1007 856 .probe = rspi_probe, 1008 857 .remove = rspi_remove, 858 + .id_table = spi_driver_ids, 1009 859 .driver = { 1010 - .name = "rspi", 860 + .name = "renesas_spi", 1011 861 .owner = THIS_MODULE, 1012 862 }, 1013 863 };
+2 -2
drivers/spi/spi-s3c24xx.c
··· 280 280 * so the caller does not need to do anything more than start the transfer 281 281 * as normal, since the IRQ will have been re-routed to the FIQ handler. 282 282 */ 283 - void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw) 283 + static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw) 284 284 { 285 285 struct pt_regs regs; 286 286 enum spi_fiq_mode mode; ··· 524 524 hw = spi_master_get_devdata(master); 525 525 memset(hw, 0, sizeof(struct s3c24xx_spi)); 526 526 527 - hw->master = spi_master_get(master); 527 + hw->master = master; 528 528 hw->pdata = pdata = dev_get_platdata(&pdev->dev); 529 529 hw->dev = &pdev->dev; 530 530
+135 -143
drivers/spi/spi-s3c64xx.c
··· 205 205 #endif 206 206 struct s3c64xx_spi_port_config *port_conf; 207 207 unsigned int port_id; 208 - unsigned long gpios[4]; 209 208 bool cs_gpio; 210 209 }; 211 210 ··· 558 559 static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, 559 560 struct spi_device *spi) 560 561 { 561 - struct s3c64xx_spi_csinfo *cs; 562 - 563 562 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */ 564 563 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ 565 564 /* Deselect the last toggled device */ 566 - cs = sdd->tgl_spi->controller_data; 567 - if (sdd->cs_gpio) 568 - gpio_set_value(cs->line, 565 + if (spi->cs_gpio >= 0) 566 + gpio_set_value(spi->cs_gpio, 569 567 spi->mode & SPI_CS_HIGH ? 0 : 1); 570 568 } 571 569 sdd->tgl_spi = NULL; 572 570 } 573 571 574 - cs = spi->controller_data; 575 - if (sdd->cs_gpio) 576 - gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); 577 - 578 - /* Start the signals */ 579 - writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 572 + if (spi->cs_gpio >= 0) 573 + gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0); 580 574 } 581 575 582 576 static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, ··· 694 702 static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, 695 703 struct spi_device *spi) 696 704 { 697 - struct s3c64xx_spi_csinfo *cs = spi->controller_data; 698 - 699 705 if (sdd->tgl_spi == spi) 700 706 sdd->tgl_spi = NULL; 701 707 702 - if (sdd->cs_gpio) 703 - gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); 704 - 705 - /* Quiese the signals */ 706 - writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 708 + if (spi->cs_gpio >= 0) 709 + gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); 707 710 } 708 711 709 712 static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) ··· 849 862 } 850 863 } 851 864 852 - static int s3c64xx_spi_transfer_one_message(struct spi_master *master, 853 - struct spi_message *msg) 865 + static int s3c64xx_spi_prepare_message(struct spi_master *master, 866 + struct spi_message *msg) 854 867 { 855 868 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 856 869 struct spi_device *spi = msg->spi; 857 870 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 858 - struct spi_transfer *xfer; 859 - int status = 0, cs_toggle = 0; 860 - u32 speed; 861 - u8 bpw; 862 871 863 872 /* If Master's(controller) state differs from that needed by Slave */ 864 873 if (sdd->cur_speed != spi->max_speed_hz ··· 870 887 if (s3c64xx_spi_map_mssg(sdd, msg)) { 871 888 dev_err(&spi->dev, 872 889 "Xfer: Unable to map message buffers!\n"); 873 - status = -ENOMEM; 874 - goto out; 890 + return -ENOMEM; 875 891 } 876 892 877 893 /* Configure feedback delay */ 878 894 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); 879 895 880 - list_for_each_entry(xfer, &msg->transfers, transfer_list) { 896 + return 0; 897 + } 881 898 882 - unsigned long flags; 883 - int use_dma; 899 + static int s3c64xx_spi_transfer_one(struct spi_master *master, 900 + struct spi_device *spi, 901 + struct spi_transfer *xfer) 902 + { 903 + struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 904 + int status; 905 + u32 speed; 906 + u8 bpw; 907 + unsigned long flags; 908 + int use_dma; 884 909 885 - INIT_COMPLETION(sdd->xfer_completion); 910 + INIT_COMPLETION(sdd->xfer_completion); 886 911 887 - /* Only BPW and Speed may change across transfers */ 888 - bpw = xfer->bits_per_word; 889 - speed = xfer->speed_hz ? : spi->max_speed_hz; 912 + /* Only BPW and Speed may change across transfers */ 913 + bpw = xfer->bits_per_word; 914 + speed = xfer->speed_hz ? : spi->max_speed_hz; 890 915 891 - if (xfer->len % (bpw / 8)) { 892 - dev_err(&spi->dev, 893 - "Xfer length(%u) not a multiple of word size(%u)\n", 894 - xfer->len, bpw / 8); 895 - status = -EIO; 896 - goto out; 916 + if (xfer->len % (bpw / 8)) { 917 + dev_err(&spi->dev, 918 + "Xfer length(%u) not a multiple of word size(%u)\n", 919 + xfer->len, bpw / 8); 920 + return -EIO; 921 + } 922 + 923 + if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { 924 + sdd->cur_bpw = bpw; 925 + sdd->cur_speed = speed; 926 + s3c64xx_spi_config(sdd); 927 + } 928 + 929 + /* Polling method for xfers not bigger than FIFO capacity */ 930 + use_dma = 0; 931 + if (!is_polling(sdd) && 932 + (sdd->rx_dma.ch && sdd->tx_dma.ch && 933 + (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))) 934 + use_dma = 1; 935 + 936 + spin_lock_irqsave(&sdd->lock, flags); 937 + 938 + /* Pending only which is to be done */ 939 + sdd->state &= ~RXBUSY; 940 + sdd->state &= ~TXBUSY; 941 + 942 + enable_datapath(sdd, spi, xfer, use_dma); 943 + 944 + /* Start the signals */ 945 + writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 946 + 947 + /* Start the signals */ 948 + writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 949 + 950 + spin_unlock_irqrestore(&sdd->lock, flags); 951 + 952 + status = wait_for_xfer(sdd, xfer, use_dma); 953 + 954 + if (status) { 955 + dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 956 + xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, 957 + (sdd->state & RXBUSY) ? 'f' : 'p', 958 + (sdd->state & TXBUSY) ? 'f' : 'p', 959 + xfer->len); 960 + 961 + if (use_dma) { 962 + if (xfer->tx_buf != NULL 963 + && (sdd->state & TXBUSY)) 964 + s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma); 965 + if (xfer->rx_buf != NULL 966 + && (sdd->state & RXBUSY)) 967 + s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma); 897 968 } 898 - 899 - if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { 900 - sdd->cur_bpw = bpw; 901 - sdd->cur_speed = speed; 902 - s3c64xx_spi_config(sdd); 903 - } 904 - 905 - /* Polling method for xfers not bigger than FIFO capacity */ 906 - use_dma = 0; 907 - if (!is_polling(sdd) && 908 - (sdd->rx_dma.ch && sdd->tx_dma.ch && 909 - (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))) 910 - use_dma = 1; 911 - 912 - spin_lock_irqsave(&sdd->lock, flags); 913 - 914 - /* Pending only which is to be done */ 915 - sdd->state &= ~RXBUSY; 916 - sdd->state &= ~TXBUSY; 917 - 918 - enable_datapath(sdd, spi, xfer, use_dma); 919 - 920 - /* Slave Select */ 921 - enable_cs(sdd, spi); 922 - 923 - spin_unlock_irqrestore(&sdd->lock, flags); 924 - 925 - status = wait_for_xfer(sdd, xfer, use_dma); 926 - 927 - if (status) { 928 - dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 929 - xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, 930 - (sdd->state & RXBUSY) ? 'f' : 'p', 931 - (sdd->state & TXBUSY) ? 'f' : 'p', 932 - xfer->len); 933 - 934 - if (use_dma) { 935 - if (xfer->tx_buf != NULL 936 - && (sdd->state & TXBUSY)) 937 - s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma); 938 - if (xfer->rx_buf != NULL 939 - && (sdd->state & RXBUSY)) 940 - s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma); 941 - } 942 - 943 - goto out; 944 - } 945 - 946 - if (xfer->delay_usecs) 947 - udelay(xfer->delay_usecs); 948 - 949 - if (xfer->cs_change) { 950 - /* Hint that the next mssg is gonna be 951 - for the same device */ 952 - if (list_is_last(&xfer->transfer_list, 953 - &msg->transfers)) 954 - cs_toggle = 1; 955 - } 956 - 957 - msg->actual_length += xfer->len; 958 - 969 + } else { 959 970 flush_fifo(sdd); 960 971 } 961 972 962 - out: 963 - if (!cs_toggle || status) 964 - disable_cs(sdd, spi); 965 - else 966 - sdd->tgl_spi = spi; 973 + return status; 974 + } 975 + 976 + static int s3c64xx_spi_unprepare_message(struct spi_master *master, 977 + struct spi_message *msg) 978 + { 979 + struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 967 980 968 981 s3c64xx_spi_unmap_mssg(sdd, msg); 969 - 970 - msg->status = status; 971 - 972 - spi_finalize_current_message(master); 973 982 974 983 return 0; 975 984 } ··· 1046 1071 cs->line, err); 1047 1072 goto err_gpio_req; 1048 1073 } 1074 + 1075 + spi->cs_gpio = cs->line; 1049 1076 } 1050 1077 1051 1078 spi_set_ctldata(spi, cs); ··· 1094 1117 } 1095 1118 1096 1119 pm_runtime_put(&sdd->pdev->dev); 1120 + writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 1097 1121 disable_cs(sdd, spi); 1098 1122 return 0; 1099 1123 1100 1124 setup_exit: 1125 + pm_runtime_put(&sdd->pdev->dev); 1101 1126 /* setup() returns with device de-selected */ 1127 + writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 1102 1128 disable_cs(sdd, spi); 1103 1129 1104 1130 gpio_free(cs->line); ··· 1120 1140 struct s3c64xx_spi_driver_data *sdd; 1121 1141 1122 1142 sdd = spi_master_get_devdata(spi->master); 1123 - if (cs && sdd->cs_gpio) { 1124 - gpio_free(cs->line); 1143 + if (spi->cs_gpio) { 1144 + gpio_free(spi->cs_gpio); 1125 1145 if (spi->dev.of_node) 1126 1146 kfree(cs); 1127 1147 } ··· 1339 1359 master->setup = s3c64xx_spi_setup; 1340 1360 master->cleanup = s3c64xx_spi_cleanup; 1341 1361 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer; 1342 - master->transfer_one_message = s3c64xx_spi_transfer_one_message; 1362 + master->prepare_message = s3c64xx_spi_prepare_message; 1363 + master->transfer_one = s3c64xx_spi_transfer_one; 1364 + master->unprepare_message = s3c64xx_spi_unprepare_message; 1343 1365 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer; 1344 1366 master->num_chipselect = sci->num_cs; 1345 1367 master->dma_alignment = 8; ··· 1410 1428 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN, 1411 1429 sdd->regs + S3C64XX_SPI_INT_EN); 1412 1430 1431 + pm_runtime_set_active(&pdev->dev); 1413 1432 pm_runtime_enable(&pdev->dev); 1414 1433 1415 - if (spi_register_master(master)) { 1416 - dev_err(&pdev->dev, "cannot register SPI master\n"); 1417 - ret = -EBUSY; 1434 + ret = devm_spi_register_master(&pdev->dev, master); 1435 + if (ret != 0) { 1436 + dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret); 1418 1437 goto err3; 1419 1438 } 1420 1439 ··· 1444 1461 1445 1462 pm_runtime_disable(&pdev->dev); 1446 1463 1447 - spi_unregister_master(master); 1448 - 1449 1464 writel(0, sdd->regs + S3C64XX_SPI_INT_EN); 1450 1465 1451 1466 clk_disable_unprepare(sdd->src_clk); 1452 1467 1453 1468 clk_disable_unprepare(sdd->clk); 1454 - 1455 - spi_master_put(master); 1456 1469 1457 1470 return 0; 1458 1471 } ··· 1459 1480 struct spi_master *master = dev_get_drvdata(dev); 1460 1481 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1461 1482 1462 - spi_master_suspend(master); 1483 + int ret = spi_master_suspend(master); 1484 + if (ret) 1485 + return ret; 1463 1486 1464 - /* Disable the clock */ 1465 - clk_disable_unprepare(sdd->src_clk); 1466 - clk_disable_unprepare(sdd->clk); 1487 + if (!pm_runtime_suspended(dev)) { 1488 + clk_disable_unprepare(sdd->clk); 1489 + clk_disable_unprepare(sdd->src_clk); 1490 + } 1467 1491 1468 1492 sdd->cur_speed = 0; /* Output Clock is stopped */ 1469 1493 ··· 1482 1500 if (sci->cfg_gpio) 1483 1501 sci->cfg_gpio(); 1484 1502 1485 - /* Enable the clock */ 1486 - clk_prepare_enable(sdd->src_clk); 1487 - clk_prepare_enable(sdd->clk); 1503 + if (!pm_runtime_suspended(dev)) { 1504 + clk_prepare_enable(sdd->src_clk); 1505 + clk_prepare_enable(sdd->clk); 1506 + } 1488 1507 1489 1508 s3c64xx_spi_hwinit(sdd, sdd->port_id); 1490 1509 1491 - spi_master_resume(master); 1492 - 1493 - return 0; 1510 + return spi_master_resume(master); 1494 1511 } 1495 1512 #endif /* CONFIG_PM_SLEEP */ 1496 1513 ··· 1509 1528 { 1510 1529 struct spi_master *master = dev_get_drvdata(dev); 1511 1530 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1531 + int ret; 1512 1532 1513 - clk_prepare_enable(sdd->src_clk); 1514 - clk_prepare_enable(sdd->clk); 1533 + ret = clk_prepare_enable(sdd->src_clk); 1534 + if (ret != 0) 1535 + return ret; 1536 + 1537 + ret = clk_prepare_enable(sdd->clk); 1538 + if (ret != 0) { 1539 + clk_disable_unprepare(sdd->src_clk); 1540 + return ret; 1541 + } 1515 1542 1516 1543 return 0; 1517 1544 } ··· 1605 1616 }; 1606 1617 1607 1618 static const struct of_device_id s3c64xx_spi_dt_match[] = { 1619 + { .compatible = "samsung,s3c2443-spi", 1620 + .data = (void *)&s3c2443_spi_port_config, 1621 + }, 1622 + { .compatible = "samsung,s3c6410-spi", 1623 + .data = (void *)&s3c6410_spi_port_config, 1624 + }, 1625 + { .compatible = "samsung,s5pc100-spi", 1626 + .data = (void *)&s5pc100_spi_port_config, 1627 + }, 1628 + { .compatible = "samsung,s5pv210-spi", 1629 + .data = (void *)&s5pv210_spi_port_config, 1630 + }, 1608 1631 { .compatible = "samsung,exynos4210-spi", 1609 1632 .data = (void *)&exynos4_spi_port_config, 1610 1633 }, ··· 1634 1633 .pm = &s3c64xx_spi_pm, 1635 1634 .of_match_table = of_match_ptr(s3c64xx_spi_dt_match), 1636 1635 }, 1636 + .probe = s3c64xx_spi_probe, 1637 1637 .remove = s3c64xx_spi_remove, 1638 1638 .id_table = s3c64xx_spi_driver_ids, 1639 1639 }; 1640 1640 MODULE_ALIAS("platform:s3c64xx-spi"); 1641 1641 1642 - static int __init s3c64xx_spi_init(void) 1643 - { 1644 - return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); 1645 - } 1646 - subsys_initcall(s3c64xx_spi_init); 1647 - 1648 - static void __exit s3c64xx_spi_exit(void) 1649 - { 1650 - platform_driver_unregister(&s3c64xx_spi_driver); 1651 - } 1652 - module_exit(s3c64xx_spi_exit); 1642 + module_platform_driver(s3c64xx_spi_driver); 1653 1643 1654 1644 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); 1655 1645 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
+10 -3
drivers/spi/spi-sh-hspi.c
··· 137 137 rate /= 16; 138 138 139 139 /* CLKCx calculation */ 140 - rate /= (((idiv_clk & 0x1F) + 1) * 2) ; 140 + rate /= (((idiv_clk & 0x1F) + 1) * 2); 141 141 142 142 /* save best settings */ 143 143 tmp = abs(target_rate - rate); ··· 303 303 master->setup = hspi_setup; 304 304 master->cleanup = hspi_cleanup; 305 305 master->mode_bits = SPI_CPOL | SPI_CPHA; 306 + master->dev.of_node = pdev->dev.of_node; 306 307 master->auto_runtime_pm = true; 307 308 master->transfer_one_message = hspi_transfer_one_message; 308 - ret = spi_register_master(master); 309 + ret = devm_spi_register_master(&pdev->dev, master); 309 310 if (ret < 0) { 310 311 dev_err(&pdev->dev, "spi_register_master error.\n"); 311 312 goto error1; ··· 329 328 pm_runtime_disable(&pdev->dev); 330 329 331 330 clk_put(hspi->clk); 332 - spi_unregister_master(hspi->master); 333 331 334 332 return 0; 335 333 } 334 + 335 + static struct of_device_id hspi_of_match[] = { 336 + { .compatible = "renesas,hspi", }, 337 + { /* sentinel */ } 338 + }; 339 + MODULE_DEVICE_TABLE(of, hspi_of_match); 336 340 337 341 static struct platform_driver hspi_driver = { 338 342 .probe = hspi_probe, ··· 345 339 .driver = { 346 340 .name = "sh-hspi", 347 341 .owner = THIS_MODULE, 342 + .of_match_table = hspi_of_match, 348 343 }, 349 344 }; 350 345 module_platform_driver(hspi_driver);
+1 -1
drivers/spi/spi-sh-sci.c
··· 133 133 sp->info = dev_get_platdata(&dev->dev); 134 134 135 135 /* setup spi bitbang adaptor */ 136 - sp->bitbang.master = spi_master_get(master); 136 + sp->bitbang.master = master; 137 137 sp->bitbang.master->bus_num = sp->info->bus_num; 138 138 sp->bitbang.master->num_chipselect = sp->info->num_chipselect; 139 139 sp->bitbang.chipselect = sh_sci_spi_chipselect;
+1 -1
drivers/spi/spi-sirf.c
··· 632 632 if (ret) 633 633 goto free_master; 634 634 635 - sspi->bitbang.master = spi_master_get(master); 635 + sspi->bitbang.master = master; 636 636 sspi->bitbang.chipselect = spi_sirfsoc_chipselect; 637 637 sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer; 638 638 sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
+70 -20
drivers/spi/spi-tegra114.c
··· 182 182 u32 cur_speed; 183 183 184 184 struct spi_device *cur_spi; 185 + struct spi_device *cs_control; 185 186 unsigned cur_pos; 186 187 unsigned cur_len; 187 188 unsigned words_per_32bit; ··· 268 267 unsigned max_len; 269 268 unsigned total_fifo_words; 270 269 271 - tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1; 270 + tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8); 272 271 273 272 if (bits_per_word == 8 || bits_per_word == 16) { 274 273 tspi->is_packed = 1; ··· 677 676 dma_release_channel(dma_chan); 678 677 } 679 678 680 - static int tegra_spi_start_transfer_one(struct spi_device *spi, 681 - struct spi_transfer *t, bool is_first_of_msg, 682 - bool is_single_xfer) 679 + static unsigned long tegra_spi_setup_transfer_one(struct spi_device *spi, 680 + struct spi_transfer *t, bool is_first_of_msg) 683 681 { 684 682 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); 685 683 u32 speed = t->speed_hz; 686 684 u8 bits_per_word = t->bits_per_word; 687 - unsigned total_fifo_words; 688 - int ret; 689 685 unsigned long command1; 690 686 int req_mode; 691 687 ··· 696 698 tspi->cur_rx_pos = 0; 697 699 tspi->cur_tx_pos = 0; 698 700 tspi->curr_xfer = t; 699 - total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t); 700 701 701 702 if (is_first_of_msg) { 702 703 tegra_spi_clear_status(tspi); ··· 714 717 else if (req_mode == SPI_MODE_3) 715 718 command1 |= SPI_CONTROL_MODE_3; 716 719 717 - tegra_spi_writel(tspi, command1, SPI_COMMAND1); 720 + if (tspi->cs_control) { 721 + if (tspi->cs_control != spi) 722 + tegra_spi_writel(tspi, command1, SPI_COMMAND1); 723 + tspi->cs_control = NULL; 724 + } else 725 + tegra_spi_writel(tspi, command1, SPI_COMMAND1); 718 726 719 727 command1 |= SPI_CS_SW_HW; 720 728 if (spi->mode & SPI_CS_HIGH) ··· 733 731 command1 &= ~SPI_BIT_LENGTH(~0); 734 732 command1 |= SPI_BIT_LENGTH(bits_per_word - 1); 735 733 } 734 + 735 + return command1; 736 + } 737 + 738 + static int tegra_spi_start_transfer_one(struct spi_device *spi, 739 + struct spi_transfer *t, unsigned long command1) 740 + { 741 + struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); 742 + unsigned total_fifo_words; 743 + int ret; 744 + 745 + total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t); 736 746 737 747 if (tspi->is_packed) 738 748 command1 |= SPI_PACKED; ··· 817 803 return 0; 818 804 } 819 805 806 + static void tegra_spi_transfer_delay(int delay) 807 + { 808 + if (!delay) 809 + return; 810 + 811 + if (delay >= 1000) 812 + mdelay(delay / 1000); 813 + 814 + udelay(delay % 1000); 815 + } 816 + 820 817 static int tegra_spi_transfer_one_message(struct spi_master *master, 821 818 struct spi_message *msg) 822 819 { 823 820 bool is_first_msg = true; 824 - int single_xfer; 825 821 struct tegra_spi_data *tspi = spi_master_get_devdata(master); 826 822 struct spi_transfer *xfer; 827 823 struct spi_device *spi = msg->spi; 828 824 int ret; 825 + bool skip = false; 829 826 830 827 msg->status = 0; 831 828 msg->actual_length = 0; 832 829 833 - single_xfer = list_is_singular(&msg->transfers); 834 830 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 831 + unsigned long cmd1; 832 + 835 833 INIT_COMPLETION(tspi->xfer_completion); 836 - ret = tegra_spi_start_transfer_one(spi, xfer, 837 - is_first_msg, single_xfer); 834 + 835 + cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg); 836 + 837 + if (!xfer->len) { 838 + ret = 0; 839 + skip = true; 840 + goto complete_xfer; 841 + } 842 + 843 + ret = tegra_spi_start_transfer_one(spi, xfer, cmd1); 838 844 if (ret < 0) { 839 845 dev_err(tspi->dev, 840 846 "spi can not start transfer, err %d\n", ret); 841 - goto exit; 847 + goto complete_xfer; 842 848 } 849 + 843 850 is_first_msg = false; 844 851 ret = wait_for_completion_timeout(&tspi->xfer_completion, 845 852 SPI_DMA_TIMEOUT); ··· 868 833 dev_err(tspi->dev, 869 834 "spi trasfer timeout, err %d\n", ret); 870 835 ret = -EIO; 871 - goto exit; 836 + goto complete_xfer; 872 837 } 873 838 874 839 if (tspi->tx_status || tspi->rx_status) { 875 840 dev_err(tspi->dev, "Error in Transfer\n"); 876 841 ret = -EIO; 877 - goto exit; 842 + goto complete_xfer; 878 843 } 879 844 msg->actual_length += xfer->len; 880 - if (xfer->cs_change && xfer->delay_usecs) { 845 + 846 + complete_xfer: 847 + if (ret < 0 || skip) { 881 848 tegra_spi_writel(tspi, tspi->def_command1_reg, 882 849 SPI_COMMAND1); 883 - udelay(xfer->delay_usecs); 850 + tegra_spi_transfer_delay(xfer->delay_usecs); 851 + goto exit; 852 + } else if (msg->transfers.prev == &xfer->transfer_list) { 853 + /* This is the last transfer in message */ 854 + if (xfer->cs_change) 855 + tspi->cs_control = spi; 856 + else { 857 + tegra_spi_writel(tspi, tspi->def_command1_reg, 858 + SPI_COMMAND1); 859 + tegra_spi_transfer_delay(xfer->delay_usecs); 860 + } 861 + } else if (xfer->cs_change) { 862 + tegra_spi_writel(tspi, tspi->def_command1_reg, 863 + SPI_COMMAND1); 864 + tegra_spi_transfer_delay(xfer->delay_usecs); 884 865 } 866 + 885 867 } 886 868 ret = 0; 887 869 exit: 888 - tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); 889 870 msg->status = ret; 890 871 spi_finalize_current_message(master); 891 872 return ret; ··· 1166 1115 pm_runtime_put(&pdev->dev); 1167 1116 1168 1117 master->dev.of_node = pdev->dev.of_node; 1169 - ret = spi_register_master(master); 1118 + ret = devm_spi_register_master(&pdev->dev, master); 1170 1119 if (ret < 0) { 1171 1120 dev_err(&pdev->dev, "can not register to master err %d\n", ret); 1172 1121 goto exit_pm_disable; ··· 1193 1142 struct tegra_spi_data *tspi = spi_master_get_devdata(master); 1194 1143 1195 1144 free_irq(tspi->irq, tspi); 1196 - spi_unregister_master(master); 1197 1145 1198 1146 if (tspi->tx_dma_chan) 1199 1147 tegra_spi_deinit_dma_param(tspi, false);
+2 -3
drivers/spi/spi-tegra20-sflash.c
··· 173 173 unsigned remain_len = t->len - tsd->cur_pos; 174 174 unsigned max_word; 175 175 176 - tsd->bytes_per_word = (t->bits_per_word - 1) / 8 + 1; 176 + tsd->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8); 177 177 max_word = remain_len / tsd->bytes_per_word; 178 178 if (max_word > SPI_FIFO_DEPTH) 179 179 max_word = SPI_FIFO_DEPTH; ··· 529 529 pm_runtime_put(&pdev->dev); 530 530 531 531 master->dev.of_node = pdev->dev.of_node; 532 - ret = spi_register_master(master); 532 + ret = devm_spi_register_master(&pdev->dev, master); 533 533 if (ret < 0) { 534 534 dev_err(&pdev->dev, "can not register to master err %d\n", ret); 535 535 goto exit_pm_disable; ··· 553 553 struct tegra_sflash_data *tsd = spi_master_get_devdata(master); 554 554 555 555 free_irq(tsd->irq, tsd); 556 - spi_unregister_master(master); 557 556 558 557 pm_runtime_disable(&pdev->dev); 559 558 if (!pm_runtime_status_suspended(&pdev->dev))
+70 -76
drivers/spi/spi-tegra20-slink.c
··· 278 278 { 279 279 unsigned remain_len = t->len - tspi->cur_pos; 280 280 unsigned max_word; 281 - unsigned bits_per_word ; 281 + unsigned bits_per_word; 282 282 unsigned max_len; 283 283 unsigned total_fifo_words; 284 284 285 285 bits_per_word = t->bits_per_word; 286 - tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1; 286 + tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8); 287 287 288 288 if (bits_per_word == 8 || bits_per_word == 16) { 289 289 tspi->is_packed = 1; ··· 707 707 } 708 708 709 709 static int tegra_slink_start_transfer_one(struct spi_device *spi, 710 - struct spi_transfer *t, bool is_first_of_msg, 711 - bool is_single_xfer) 710 + struct spi_transfer *t) 712 711 { 713 712 struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master); 714 713 u32 speed; ··· 731 732 tspi->curr_xfer = t; 732 733 total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t); 733 734 734 - if (is_first_of_msg) { 735 - tegra_slink_clear_status(tspi); 735 + command = tspi->command_reg; 736 + command &= ~SLINK_BIT_LENGTH(~0); 737 + command |= SLINK_BIT_LENGTH(bits_per_word - 1); 736 738 737 - command = tspi->def_command_reg; 738 - command |= SLINK_BIT_LENGTH(bits_per_word - 1); 739 - command |= SLINK_CS_SW | SLINK_CS_VALUE; 740 - 741 - command2 = tspi->def_command2_reg; 742 - command2 |= SLINK_SS_EN_CS(spi->chip_select); 743 - 744 - command &= ~SLINK_MODES; 745 - if (spi->mode & SPI_CPHA) 746 - command |= SLINK_CK_SDA; 747 - 748 - if (spi->mode & SPI_CPOL) 749 - command |= SLINK_IDLE_SCLK_DRIVE_HIGH; 750 - else 751 - command |= SLINK_IDLE_SCLK_DRIVE_LOW; 752 - } else { 753 - command = tspi->command_reg; 754 - command &= ~SLINK_BIT_LENGTH(~0); 755 - command |= SLINK_BIT_LENGTH(bits_per_word - 1); 756 - 757 - command2 = tspi->command2_reg; 758 - command2 &= ~(SLINK_RXEN | SLINK_TXEN); 759 - } 739 + command2 = tspi->command2_reg; 740 + command2 &= ~(SLINK_RXEN | SLINK_TXEN); 760 741 761 742 tegra_slink_writel(tspi, command, SLINK_COMMAND); 762 743 tspi->command_reg = command; ··· 803 824 return 0; 804 825 } 805 826 806 - static int tegra_slink_transfer_one_message(struct spi_master *master, 807 - struct spi_message *msg) 827 + static int tegra_slink_prepare_message(struct spi_master *master, 828 + struct spi_message *msg) 808 829 { 809 - bool is_first_msg = true; 810 - int single_xfer; 811 830 struct tegra_slink_data *tspi = spi_master_get_devdata(master); 812 - struct spi_transfer *xfer; 813 831 struct spi_device *spi = msg->spi; 832 + 833 + tegra_slink_clear_status(tspi); 834 + 835 + tspi->command_reg = tspi->def_command_reg; 836 + tspi->command_reg |= SLINK_CS_SW | SLINK_CS_VALUE; 837 + 838 + tspi->command2_reg = tspi->def_command2_reg; 839 + tspi->command2_reg |= SLINK_SS_EN_CS(spi->chip_select); 840 + 841 + tspi->command_reg &= ~SLINK_MODES; 842 + if (spi->mode & SPI_CPHA) 843 + tspi->command_reg |= SLINK_CK_SDA; 844 + 845 + if (spi->mode & SPI_CPOL) 846 + tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_HIGH; 847 + else 848 + tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_LOW; 849 + 850 + return 0; 851 + } 852 + 853 + static int tegra_slink_transfer_one(struct spi_master *master, 854 + struct spi_device *spi, 855 + struct spi_transfer *xfer) 856 + { 857 + struct tegra_slink_data *tspi = spi_master_get_devdata(master); 814 858 int ret; 815 859 816 - msg->status = 0; 817 - msg->actual_length = 0; 818 - 819 - single_xfer = list_is_singular(&msg->transfers); 820 - list_for_each_entry(xfer, &msg->transfers, transfer_list) { 821 - INIT_COMPLETION(tspi->xfer_completion); 822 - ret = tegra_slink_start_transfer_one(spi, xfer, 823 - is_first_msg, single_xfer); 824 - if (ret < 0) { 825 - dev_err(tspi->dev, 826 - "spi can not start transfer, err %d\n", ret); 827 - goto exit; 828 - } 829 - is_first_msg = false; 830 - ret = wait_for_completion_timeout(&tspi->xfer_completion, 831 - SLINK_DMA_TIMEOUT); 832 - if (WARN_ON(ret == 0)) { 833 - dev_err(tspi->dev, 834 - "spi trasfer timeout, err %d\n", ret); 835 - ret = -EIO; 836 - goto exit; 837 - } 838 - 839 - if (tspi->tx_status || tspi->rx_status) { 840 - dev_err(tspi->dev, "Error in Transfer\n"); 841 - ret = -EIO; 842 - goto exit; 843 - } 844 - msg->actual_length += xfer->len; 845 - if (xfer->cs_change && xfer->delay_usecs) { 846 - tegra_slink_writel(tspi, tspi->def_command_reg, 847 - SLINK_COMMAND); 848 - udelay(xfer->delay_usecs); 849 - } 860 + INIT_COMPLETION(tspi->xfer_completion); 861 + ret = tegra_slink_start_transfer_one(spi, xfer); 862 + if (ret < 0) { 863 + dev_err(tspi->dev, 864 + "spi can not start transfer, err %d\n", ret); 865 + return ret; 850 866 } 851 - ret = 0; 852 - exit: 867 + 868 + ret = wait_for_completion_timeout(&tspi->xfer_completion, 869 + SLINK_DMA_TIMEOUT); 870 + if (WARN_ON(ret == 0)) { 871 + dev_err(tspi->dev, 872 + "spi trasfer timeout, err %d\n", ret); 873 + return -EIO; 874 + } 875 + 876 + if (tspi->tx_status) 877 + return tspi->tx_status; 878 + if (tspi->rx_status) 879 + return tspi->rx_status; 880 + 881 + return 0; 882 + } 883 + 884 + static int tegra_slink_unprepare_message(struct spi_master *master, 885 + struct spi_message *msg) 886 + { 887 + struct tegra_slink_data *tspi = spi_master_get_devdata(master); 888 + 853 889 tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); 854 890 tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); 855 - msg->status = ret; 856 - spi_finalize_current_message(master); 857 - return ret; 891 + 892 + return 0; 858 893 } 859 894 860 895 static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi) ··· 1071 1078 /* the spi->mode bits understood by this driver: */ 1072 1079 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1073 1080 master->setup = tegra_slink_setup; 1074 - master->transfer_one_message = tegra_slink_transfer_one_message; 1081 + master->prepare_message = tegra_slink_prepare_message; 1082 + master->transfer_one = tegra_slink_transfer_one; 1083 + master->unprepare_message = tegra_slink_unprepare_message; 1075 1084 master->auto_runtime_pm = true; 1076 1085 master->num_chipselect = MAX_CHIP_SELECT; 1077 1086 master->bus_num = -1; ··· 1159 1164 pm_runtime_put(&pdev->dev); 1160 1165 1161 1166 master->dev.of_node = pdev->dev.of_node; 1162 - ret = spi_register_master(master); 1167 + ret = devm_spi_register_master(&pdev->dev, master); 1163 1168 if (ret < 0) { 1164 1169 dev_err(&pdev->dev, "can not register to master err %d\n", ret); 1165 1170 goto exit_pm_disable; ··· 1186 1191 struct tegra_slink_data *tspi = spi_master_get_devdata(master); 1187 1192 1188 1193 free_irq(tspi->irq, tspi); 1189 - spi_unregister_master(master); 1190 1194 1191 1195 if (tspi->tx_dma_chan) 1192 1196 tegra_slink_deinit_dma_param(tspi, false);
+10 -38
drivers/spi/spi-ti-qspi.c
··· 41 41 struct ti_qspi { 42 42 struct completion transfer_complete; 43 43 44 - /* IRQ synchronization */ 45 - spinlock_t lock; 46 - 47 44 /* list synchronization */ 48 45 struct mutex list_lock; 49 46 ··· 54 57 u32 spi_max_frequency; 55 58 u32 cmd; 56 59 u32 dc; 57 - u32 stat; 58 60 }; 59 61 60 62 #define QSPI_PID (0x0) ··· 393 397 { 394 398 struct ti_qspi *qspi = dev_id; 395 399 u16 int_stat; 400 + u32 stat; 396 401 397 402 irqreturn_t ret = IRQ_HANDLED; 398 403 399 - spin_lock(&qspi->lock); 400 - 401 404 int_stat = ti_qspi_read(qspi, QSPI_INTR_STATUS_ENABLED_CLEAR); 402 - qspi->stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); 405 + stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); 403 406 404 407 if (!int_stat) { 405 408 dev_dbg(qspi->dev, "No IRQ triggered\n"); ··· 406 411 goto out; 407 412 } 408 413 409 - ret = IRQ_WAKE_THREAD; 410 - 411 - ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG); 412 414 ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, 413 415 QSPI_INTR_STATUS_ENABLED_CLEAR); 414 - 415 - out: 416 - spin_unlock(&qspi->lock); 417 - 418 - return ret; 419 - } 420 - 421 - static irqreturn_t ti_qspi_threaded_isr(int this_irq, void *dev_id) 422 - { 423 - struct ti_qspi *qspi = dev_id; 424 - unsigned long flags; 425 - 426 - spin_lock_irqsave(&qspi->lock, flags); 427 - 428 - if (qspi->stat & WC) 416 + if (stat & WC) 429 417 complete(&qspi->transfer_complete); 430 - 431 - spin_unlock_irqrestore(&qspi->lock, flags); 432 - 433 - ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG); 434 - 435 - return IRQ_HANDLED; 418 + out: 419 + return ret; 436 420 } 437 421 438 422 static int ti_qspi_runtime_resume(struct device *dev) ··· 446 472 if (!master) 447 473 return -ENOMEM; 448 474 449 - master->mode_bits = SPI_CPOL | SPI_CPHA; 475 + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD; 450 476 451 477 master->bus_num = -1; 452 478 master->flags = SPI_MASTER_HALF_DUPLEX; ··· 473 499 return irq; 474 500 } 475 501 476 - spin_lock_init(&qspi->lock); 477 502 mutex_init(&qspi->list_lock); 478 503 479 504 qspi->base = devm_ioremap_resource(&pdev->dev, r); ··· 481 508 goto free_master; 482 509 } 483 510 484 - ret = devm_request_threaded_irq(&pdev->dev, irq, ti_qspi_isr, 485 - ti_qspi_threaded_isr, 0, 511 + ret = devm_request_irq(&pdev->dev, irq, ti_qspi_isr, 0, 486 512 dev_name(&pdev->dev), qspi); 487 513 if (ret < 0) { 488 514 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", ··· 504 532 if (!of_property_read_u32(np, "spi-max-frequency", &max_freq)) 505 533 qspi->spi_max_frequency = max_freq; 506 534 507 - ret = spi_register_master(master); 535 + ret = devm_spi_register_master(&pdev->dev, master); 508 536 if (ret) 509 537 goto free_master; 510 538 ··· 519 547 { 520 548 struct ti_qspi *qspi = platform_get_drvdata(pdev); 521 549 522 - spi_unregister_master(qspi->master); 550 + ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG); 523 551 524 552 return 0; 525 553 } ··· 530 558 531 559 static struct platform_driver ti_qspi_driver = { 532 560 .probe = ti_qspi_probe, 533 - .remove = ti_qspi_remove, 561 + .remove = ti_qspi_remove, 534 562 .driver = { 535 563 .name = "ti,dra7xxx-qspi", 536 564 .owner = THIS_MODULE,
+9 -8
drivers/spi/spi-topcliff-pch.c
··· 506 506 goto err_out; 507 507 } 508 508 509 - dev_dbg(&pspi->dev, "%s Transfer List not empty. " 510 - "Transfer Speed is set.\n", __func__); 509 + dev_dbg(&pspi->dev, 510 + "%s Transfer List not empty. Transfer Speed is set.\n", __func__); 511 511 512 512 spin_lock_irqsave(&data->lock, flags); 513 513 /* validate Tx/Rx buffers and Transfer length */ ··· 526 526 goto err_return_spinlock; 527 527 } 528 528 529 - dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length" 530 - " valid\n", __func__); 529 + dev_dbg(&pspi->dev, 530 + "%s Tx/Rx buffer valid. Transfer length valid\n", 531 + __func__); 531 532 532 533 /* if baud rate has been specified validate the same */ 533 534 if (transfer->speed_hz > PCH_MAX_BAUDRATE) ··· 1182 1181 spin_lock(&data->lock); 1183 1182 /* check if suspend has been initiated;if yes flush queue */ 1184 1183 if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { 1185 - dev_dbg(&data->master->dev, "%s suspend/remove initiated," 1186 - "flushing queue\n", __func__); 1184 + dev_dbg(&data->master->dev, 1185 + "%s suspend/remove initiated, flushing queue\n", __func__); 1187 1186 list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) { 1188 1187 pmsg->status = -EIO; 1189 1188 ··· 1411 1410 /* baseaddress + address offset) */ 1412 1411 data->io_base_addr = pci_resource_start(board_dat->pdev, 1) + 1413 1412 PCH_ADDRESS_SIZE * plat_dev->id; 1414 - data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) + 1415 - PCH_ADDRESS_SIZE * plat_dev->id; 1413 + data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0); 1416 1414 if (!data->io_remap_addr) { 1417 1415 dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__); 1418 1416 ret = -ENOMEM; 1419 1417 goto err_pci_iomap; 1420 1418 } 1419 + data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id; 1421 1420 1422 1421 dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n", 1423 1422 plat_dev->id, data->io_remap_addr);
+5 -6
drivers/spi/spi-txx9.c
··· 177 177 | 0x08, 178 178 TXx9_SPCR0); 179 179 180 - list_for_each_entry (t, &m->transfers, transfer_list) { 180 + list_for_each_entry(t, &m->transfers, transfer_list) { 181 181 const void *txbuf = t->tx_buf; 182 182 void *rxbuf = t->rx_buf; 183 183 u32 data; ··· 308 308 m->actual_length = 0; 309 309 310 310 /* check each transfer's parameters */ 311 - list_for_each_entry (t, &m->transfers, transfer_list) { 311 + list_for_each_entry(t, &m->transfers, transfer_list) { 312 312 u32 speed_hz = t->speed_hz ? : spi->max_speed_hz; 313 313 u8 bits_per_word = t->bits_per_word; 314 314 ··· 406 406 master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */ 407 407 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); 408 408 409 - ret = spi_register_master(master); 409 + ret = devm_spi_register_master(&dev->dev, master); 410 410 if (ret) 411 411 goto exit; 412 412 return 0; ··· 428 428 struct spi_master *master = spi_master_get(platform_get_drvdata(dev)); 429 429 struct txx9spi *c = spi_master_get_devdata(master); 430 430 431 - spi_unregister_master(master); 432 431 destroy_workqueue(c->workqueue); 433 432 clk_disable(c->clk); 434 433 clk_put(c->clk); 435 - spi_master_put(master); 436 434 return 0; 437 435 } 438 436 ··· 438 440 MODULE_ALIAS("platform:spi_txx9"); 439 441 440 442 static struct platform_driver txx9spi_driver = { 443 + .probe = txx9spi_probe, 441 444 .remove = txx9spi_remove, 442 445 .driver = { 443 446 .name = "spi_txx9", ··· 448 449 449 450 static int __init txx9spi_init(void) 450 451 { 451 - return platform_driver_probe(&txx9spi_driver, txx9spi_probe); 452 + return platform_driver_register(&txx9spi_driver); 452 453 } 453 454 subsys_initcall(txx9spi_init); 454 455
+1 -1
drivers/spi/spi-xilinx.c
··· 372 372 master->mode_bits = SPI_CPOL | SPI_CPHA; 373 373 374 374 xspi = spi_master_get_devdata(master); 375 - xspi->bitbang.master = spi_master_get(master); 375 + xspi->bitbang.master = master; 376 376 xspi->bitbang.chipselect = xilinx_spi_chipselect; 377 377 xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; 378 378 xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
+201 -43
drivers/spi/spi.c
··· 39 39 #include <linux/ioport.h> 40 40 #include <linux/acpi.h> 41 41 42 + #define CREATE_TRACE_POINTS 43 + #include <trace/events/spi.h> 44 + 42 45 static void spidev_release(struct device *dev) 43 46 { 44 47 struct spi_device *spi = to_spi_device(dev); ··· 61 58 62 59 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 63 60 } 61 + static DEVICE_ATTR_RO(modalias); 64 62 65 - static struct device_attribute spi_dev_attrs[] = { 66 - __ATTR_RO(modalias), 67 - __ATTR_NULL, 63 + static struct attribute *spi_dev_attrs[] = { 64 + &dev_attr_modalias.attr, 65 + NULL, 68 66 }; 67 + ATTRIBUTE_GROUPS(spi_dev); 69 68 70 69 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 71 70 * and the sysfs version makes coldplug work too. ··· 234 229 235 230 struct bus_type spi_bus_type = { 236 231 .name = "spi", 237 - .dev_attrs = spi_dev_attrs, 232 + .dev_groups = spi_dev_groups, 238 233 .match = spi_match_device, 239 234 .uevent = spi_uevent, 240 235 .pm = &spi_pm, ··· 328 323 if (!spi_master_get(master)) 329 324 return NULL; 330 325 331 - spi = kzalloc(sizeof *spi, GFP_KERNEL); 326 + spi = kzalloc(sizeof(*spi), GFP_KERNEL); 332 327 if (!spi) { 333 328 dev_err(dev, "cannot alloc spi_device\n"); 334 329 spi_master_put(master); ··· 528 523 529 524 /*-------------------------------------------------------------------------*/ 530 525 526 + static void spi_set_cs(struct spi_device *spi, bool enable) 527 + { 528 + if (spi->mode & SPI_CS_HIGH) 529 + enable = !enable; 530 + 531 + if (spi->cs_gpio >= 0) 532 + gpio_set_value(spi->cs_gpio, !enable); 533 + else if (spi->master->set_cs) 534 + spi->master->set_cs(spi, !enable); 535 + } 536 + 537 + /* 538 + * spi_transfer_one_message - Default implementation of transfer_one_message() 539 + * 540 + * This is a standard implementation of transfer_one_message() for 541 + * drivers which impelment a transfer_one() operation. It provides 542 + * standard handling of delays and chip select management. 543 + */ 544 + static int spi_transfer_one_message(struct spi_master *master, 545 + struct spi_message *msg) 546 + { 547 + struct spi_transfer *xfer; 548 + bool cur_cs = true; 549 + bool keep_cs = false; 550 + int ret = 0; 551 + 552 + spi_set_cs(msg->spi, true); 553 + 554 + list_for_each_entry(xfer, &msg->transfers, transfer_list) { 555 + trace_spi_transfer_start(msg, xfer); 556 + 557 + INIT_COMPLETION(master->xfer_completion); 558 + 559 + ret = master->transfer_one(master, msg->spi, xfer); 560 + if (ret < 0) { 561 + dev_err(&msg->spi->dev, 562 + "SPI transfer failed: %d\n", ret); 563 + goto out; 564 + } 565 + 566 + if (ret > 0) 567 + wait_for_completion(&master->xfer_completion); 568 + 569 + trace_spi_transfer_stop(msg, xfer); 570 + 571 + if (msg->status != -EINPROGRESS) 572 + goto out; 573 + 574 + if (xfer->delay_usecs) 575 + udelay(xfer->delay_usecs); 576 + 577 + if (xfer->cs_change) { 578 + if (list_is_last(&xfer->transfer_list, 579 + &msg->transfers)) { 580 + keep_cs = true; 581 + } else { 582 + cur_cs = !cur_cs; 583 + spi_set_cs(msg->spi, cur_cs); 584 + } 585 + } 586 + 587 + msg->actual_length += xfer->len; 588 + } 589 + 590 + out: 591 + if (ret != 0 || !keep_cs) 592 + spi_set_cs(msg->spi, false); 593 + 594 + if (msg->status == -EINPROGRESS) 595 + msg->status = ret; 596 + 597 + spi_finalize_current_message(master); 598 + 599 + return ret; 600 + } 601 + 602 + /** 603 + * spi_finalize_current_transfer - report completion of a transfer 604 + * 605 + * Called by SPI drivers using the core transfer_one_message() 606 + * implementation to notify it that the current interrupt driven 607 + * transfer has finised and the next one may be scheduled. 608 + */ 609 + void spi_finalize_current_transfer(struct spi_master *master) 610 + { 611 + complete(&master->xfer_completion); 612 + } 613 + EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 614 + 531 615 /** 532 616 * spi_pump_messages - kthread work function which processes spi message queue 533 617 * @work: pointer to kthread work struct contained in the master struct ··· 651 557 pm_runtime_mark_last_busy(master->dev.parent); 652 558 pm_runtime_put_autosuspend(master->dev.parent); 653 559 } 560 + trace_spi_master_idle(master); 654 561 return; 655 562 } 656 563 ··· 680 585 } 681 586 } 682 587 588 + if (!was_busy) 589 + trace_spi_master_busy(master); 590 + 683 591 if (!was_busy && master->prepare_transfer_hardware) { 684 592 ret = master->prepare_transfer_hardware(master); 685 593 if (ret) { ··· 693 595 pm_runtime_put(master->dev.parent); 694 596 return; 695 597 } 598 + } 599 + 600 + trace_spi_message_start(master->cur_msg); 601 + 602 + if (master->prepare_message) { 603 + ret = master->prepare_message(master, master->cur_msg); 604 + if (ret) { 605 + dev_err(&master->dev, 606 + "failed to prepare message: %d\n", ret); 607 + master->cur_msg->status = ret; 608 + spi_finalize_current_message(master); 609 + return; 610 + } 611 + master->cur_msg_prepared = true; 696 612 } 697 613 698 614 ret = master->transfer_one_message(master, master->cur_msg); ··· 790 678 { 791 679 struct spi_message *mesg; 792 680 unsigned long flags; 681 + int ret; 793 682 794 683 spin_lock_irqsave(&master->queue_lock, flags); 795 684 mesg = master->cur_msg; ··· 799 686 queue_kthread_work(&master->kworker, &master->pump_messages); 800 687 spin_unlock_irqrestore(&master->queue_lock, flags); 801 688 689 + if (master->cur_msg_prepared && master->unprepare_message) { 690 + ret = master->unprepare_message(master, mesg); 691 + if (ret) { 692 + dev_err(&master->dev, 693 + "failed to unprepare message: %d\n", ret); 694 + } 695 + } 696 + master->cur_msg_prepared = false; 697 + 802 698 mesg->state = NULL; 803 699 if (mesg->complete) 804 700 mesg->complete(mesg->context); 701 + 702 + trace_spi_message_done(mesg); 805 703 } 806 704 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 807 705 ··· 927 803 928 804 master->queued = true; 929 805 master->transfer = spi_queued_transfer; 806 + if (!master->transfer_one_message) 807 + master->transfer_one_message = spi_transfer_one_message; 930 808 931 809 /* Initialize and start queue */ 932 810 ret = spi_init_queue(master); ··· 964 838 { 965 839 struct spi_device *spi; 966 840 struct device_node *nc; 967 - const __be32 *prop; 968 - char modalias[SPI_NAME_SIZE + 4]; 969 841 int rc; 970 - int len; 842 + u32 value; 971 843 972 844 if (!master->dev.of_node) 973 845 return; ··· 990 866 } 991 867 992 868 /* Device address */ 993 - prop = of_get_property(nc, "reg", &len); 994 - if (!prop || len < sizeof(*prop)) { 995 - dev_err(&master->dev, "%s has no 'reg' property\n", 996 - nc->full_name); 869 + rc = of_property_read_u32(nc, "reg", &value); 870 + if (rc) { 871 + dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 872 + nc->full_name, rc); 997 873 spi_dev_put(spi); 998 874 continue; 999 875 } 1000 - spi->chip_select = be32_to_cpup(prop); 876 + spi->chip_select = value; 1001 877 1002 878 /* Mode (clock phase/polarity/etc.) */ 1003 879 if (of_find_property(nc, "spi-cpha", NULL)) ··· 1010 886 spi->mode |= SPI_3WIRE; 1011 887 1012 888 /* Device DUAL/QUAD mode */ 1013 - prop = of_get_property(nc, "spi-tx-bus-width", &len); 1014 - if (prop && len == sizeof(*prop)) { 1015 - switch (be32_to_cpup(prop)) { 1016 - case SPI_NBITS_SINGLE: 889 + if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 890 + switch (value) { 891 + case 1: 1017 892 break; 1018 - case SPI_NBITS_DUAL: 893 + case 2: 1019 894 spi->mode |= SPI_TX_DUAL; 1020 895 break; 1021 - case SPI_NBITS_QUAD: 896 + case 4: 1022 897 spi->mode |= SPI_TX_QUAD; 1023 898 break; 1024 899 default: 1025 900 dev_err(&master->dev, 1026 901 "spi-tx-bus-width %d not supported\n", 1027 - be32_to_cpup(prop)); 902 + value); 1028 903 spi_dev_put(spi); 1029 904 continue; 1030 905 } 1031 906 } 1032 907 1033 - prop = of_get_property(nc, "spi-rx-bus-width", &len); 1034 - if (prop && len == sizeof(*prop)) { 1035 - switch (be32_to_cpup(prop)) { 1036 - case SPI_NBITS_SINGLE: 908 + if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 909 + switch (value) { 910 + case 1: 1037 911 break; 1038 - case SPI_NBITS_DUAL: 912 + case 2: 1039 913 spi->mode |= SPI_RX_DUAL; 1040 914 break; 1041 - case SPI_NBITS_QUAD: 915 + case 4: 1042 916 spi->mode |= SPI_RX_QUAD; 1043 917 break; 1044 918 default: 1045 919 dev_err(&master->dev, 1046 920 "spi-rx-bus-width %d not supported\n", 1047 - be32_to_cpup(prop)); 921 + value); 1048 922 spi_dev_put(spi); 1049 923 continue; 1050 924 } 1051 925 } 1052 926 1053 927 /* Device speed */ 1054 - prop = of_get_property(nc, "spi-max-frequency", &len); 1055 - if (!prop || len < sizeof(*prop)) { 1056 - dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n", 1057 - nc->full_name); 928 + rc = of_property_read_u32(nc, "spi-max-frequency", &value); 929 + if (rc) { 930 + dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 931 + nc->full_name, rc); 1058 932 spi_dev_put(spi); 1059 933 continue; 1060 934 } 1061 - spi->max_speed_hz = be32_to_cpup(prop); 935 + spi->max_speed_hz = value; 1062 936 1063 937 /* IRQ */ 1064 938 spi->irq = irq_of_parse_and_map(nc, 0); ··· 1066 944 spi->dev.of_node = nc; 1067 945 1068 946 /* Register the new device */ 1069 - snprintf(modalias, sizeof(modalias), "%s%s", SPI_MODULE_PREFIX, 1070 - spi->modalias); 1071 - request_module(modalias); 947 + request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias); 1072 948 rc = spi_add_device(spi); 1073 949 if (rc) { 1074 950 dev_err(&master->dev, "spi_device register error %s\n", ··· 1145 1025 return AE_OK; 1146 1026 } 1147 1027 1148 - strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias)); 1028 + strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 1149 1029 if (spi_add_device(spi)) { 1150 1030 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1151 1031 dev_name(&adev->dev)); ··· 1217 1097 if (!dev) 1218 1098 return NULL; 1219 1099 1220 - master = kzalloc(size + sizeof *master, GFP_KERNEL); 1100 + master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1221 1101 if (!master) 1222 1102 return NULL; 1223 1103 ··· 1242 1122 return 0; 1243 1123 1244 1124 nb = of_gpio_named_count(np, "cs-gpios"); 1245 - master->num_chipselect = max(nb, (int)master->num_chipselect); 1125 + master->num_chipselect = max_t(int, nb, master->num_chipselect); 1246 1126 1247 1127 /* Return error only for an incorrectly formed cs-gpios property */ 1248 1128 if (nb == 0 || nb == -ENOENT) ··· 1329 1209 spin_lock_init(&master->bus_lock_spinlock); 1330 1210 mutex_init(&master->bus_lock_mutex); 1331 1211 master->bus_lock_flag = 0; 1212 + init_completion(&master->xfer_completion); 1332 1213 1333 1214 /* register the device, then userspace will see it. 1334 1215 * registration fails if the bus ID is in use. ··· 1365 1244 return status; 1366 1245 } 1367 1246 EXPORT_SYMBOL_GPL(spi_register_master); 1247 + 1248 + static void devm_spi_unregister(struct device *dev, void *res) 1249 + { 1250 + spi_unregister_master(*(struct spi_master **)res); 1251 + } 1252 + 1253 + /** 1254 + * dev_spi_register_master - register managed SPI master controller 1255 + * @dev: device managing SPI master 1256 + * @master: initialized master, originally from spi_alloc_master() 1257 + * Context: can sleep 1258 + * 1259 + * Register a SPI device as with spi_register_master() which will 1260 + * automatically be unregister 1261 + */ 1262 + int devm_spi_register_master(struct device *dev, struct spi_master *master) 1263 + { 1264 + struct spi_master **ptr; 1265 + int ret; 1266 + 1267 + ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1268 + if (!ptr) 1269 + return -ENOMEM; 1270 + 1271 + ret = spi_register_master(master); 1272 + if (ret != 0) { 1273 + *ptr = master; 1274 + devres_add(dev, ptr); 1275 + } else { 1276 + devres_free(ptr); 1277 + } 1278 + 1279 + return ret; 1280 + } 1281 + EXPORT_SYMBOL_GPL(devm_spi_register_master); 1368 1282 1369 1283 static int __unregister(struct device *dev, void *null) 1370 1284 { ··· 1558 1402 if (spi->master->setup) 1559 1403 status = spi->master->setup(spi); 1560 1404 1561 - dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s" 1562 - "%u bits/w, %u Hz max --> %d\n", 1405 + dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 1563 1406 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 1564 1407 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 1565 1408 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", ··· 1575 1420 { 1576 1421 struct spi_master *master = spi->master; 1577 1422 struct spi_transfer *xfer; 1423 + 1424 + message->spi = spi; 1425 + 1426 + trace_spi_message_submit(message); 1578 1427 1579 1428 if (list_empty(&message->transfers)) 1580 1429 return -EINVAL; ··· 1679 1520 } 1680 1521 } 1681 1522 1682 - message->spi = spi; 1683 1523 message->status = -EINPROGRESS; 1684 1524 return master->transfer(spi, message); 1685 1525 } ··· 1920 1762 EXPORT_SYMBOL_GPL(spi_bus_unlock); 1921 1763 1922 1764 /* portable code must never pass more than 32 bytes */ 1923 - #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) 1765 + #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 1924 1766 1925 1767 static u8 *buf; 1926 1768 ··· 1969 1811 } 1970 1812 1971 1813 spi_message_init(&message); 1972 - memset(x, 0, sizeof x); 1814 + memset(x, 0, sizeof(x)); 1973 1815 if (n_tx) { 1974 1816 x[0].len = n_tx; 1975 1817 spi_message_add_tail(&x[0], &message);
+3 -4
drivers/spi/spidev.c
··· 37 37 #include <linux/spi/spi.h> 38 38 #include <linux/spi/spidev.h> 39 39 40 - #include <asm/uaccess.h> 40 + #include <linux/uaccess.h> 41 41 42 42 43 43 /* ··· 206 206 207 207 mutex_lock(&spidev->buf_lock); 208 208 missing = copy_from_user(spidev->buffer, buf, count); 209 - if (missing == 0) { 209 + if (missing == 0) 210 210 status = spidev_sync_write(spidev, count); 211 - } else 211 + else 212 212 status = -EFAULT; 213 213 mutex_unlock(&spidev->buf_lock); 214 214 ··· 629 629 /* make sure ops on existing fds can abort cleanly */ 630 630 spin_lock_irq(&spidev->spi_lock); 631 631 spidev->spi = NULL; 632 - spi_set_drvdata(spi, NULL); 633 632 spin_unlock_irq(&spidev->spi_lock); 634 633 635 634 /* prevent new opens */
+1 -2
drivers/staging/iio/meter/ade7753.c
··· 86 86 struct ade7753_state *st = iio_priv(indio_dev); 87 87 ssize_t ret; 88 88 89 - ret = spi_w8r16(st->us, ADE7753_READ_REG(reg_address)); 89 + ret = spi_w8r16be(st->us, ADE7753_READ_REG(reg_address)); 90 90 if (ret < 0) { 91 91 dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", 92 92 reg_address); ··· 94 94 } 95 95 96 96 *val = ret; 97 - *val = be16_to_cpup(val); 98 97 99 98 return 0; 100 99 }
+1 -2
drivers/staging/iio/meter/ade7754.c
··· 86 86 struct ade7754_state *st = iio_priv(indio_dev); 87 87 int ret; 88 88 89 - ret = spi_w8r16(st->us, ADE7754_READ_REG(reg_address)); 89 + ret = spi_w8r16be(st->us, ADE7754_READ_REG(reg_address)); 90 90 if (ret < 0) { 91 91 dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", 92 92 reg_address); ··· 94 94 } 95 95 96 96 *val = ret; 97 - *val = be16_to_cpup(val); 98 97 99 98 return 0; 100 99 }
+1 -2
drivers/staging/iio/meter/ade7759.c
··· 86 86 struct ade7759_state *st = iio_priv(indio_dev); 87 87 int ret; 88 88 89 - ret = spi_w8r16(st->us, ADE7759_READ_REG(reg_address)); 89 + ret = spi_w8r16be(st->us, ADE7759_READ_REG(reg_address)); 90 90 if (ret < 0) { 91 91 dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", 92 92 reg_address); ··· 94 94 } 95 95 96 96 *val = ret; 97 - *val = be16_to_cpup(val); 98 97 99 98 return 0; 100 99 }
+2
include/linux/spi/rspi.h
··· 26 26 unsigned int dma_rx_id; 27 27 28 28 unsigned dma_width_16bit:1; /* DMAC read/write width = 16-bit */ 29 + 30 + u16 num_chipselect; 29 31 }; 30 32 31 33 #endif
+59 -2
include/linux/spi/spi.h
··· 23 23 #include <linux/mod_devicetable.h> 24 24 #include <linux/slab.h> 25 25 #include <linux/kthread.h> 26 + #include <linux/completion.h> 26 27 27 28 /* 28 29 * INTERFACES between SPI master-side drivers and SPI infrastructure. ··· 151 150 } 152 151 153 152 struct spi_message; 154 - 155 - 153 + struct spi_transfer; 156 154 157 155 /** 158 156 * struct spi_driver - Host side "protocol" driver ··· 257 257 * @queue_lock: spinlock to syncronise access to message queue 258 258 * @queue: message queue 259 259 * @cur_msg: the currently in-flight message 260 + * @cur_msg_prepared: spi_prepare_message was called for the currently 261 + * in-flight message 262 + * @xfer_completion: used by core tranfer_one_message() 260 263 * @busy: message pump is busy 261 264 * @running: message pump is running 262 265 * @rt: whether this queue is set to run as a realtime task ··· 277 274 * @unprepare_transfer_hardware: there are currently no more messages on the 278 275 * queue so the subsystem notifies the driver that it may relax the 279 276 * hardware by issuing this call 277 + * @set_cs: assert or deassert chip select, true to assert. May be called 278 + * from interrupt context. 279 + * @prepare_message: set up the controller to transfer a single message, 280 + * for example doing DMA mapping. Called from threaded 281 + * context. 282 + * @transfer_one: transfer a single spi_transfer. When the 283 + * driver is finished with this transfer it must call 284 + * spi_finalize_current_transfer() so the subsystem can issue 285 + * the next transfer 286 + * @unprepare_message: undo any work done by prepare_message(). 280 287 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 281 288 * number. Any individual value may be -ENOENT for CS lines that 282 289 * are not GPIOs (driven by the SPI controller itself). ··· 401 388 bool running; 402 389 bool rt; 403 390 bool auto_runtime_pm; 391 + bool cur_msg_prepared; 392 + struct completion xfer_completion; 404 393 405 394 int (*prepare_transfer_hardware)(struct spi_master *master); 406 395 int (*transfer_one_message)(struct spi_master *master, 407 396 struct spi_message *mesg); 408 397 int (*unprepare_transfer_hardware)(struct spi_master *master); 398 + int (*prepare_message)(struct spi_master *master, 399 + struct spi_message *message); 400 + int (*unprepare_message)(struct spi_master *master, 401 + struct spi_message *message); 402 + 403 + /* 404 + * These hooks are for drivers that use a generic implementation 405 + * of transfer_one_message() provied by the core. 406 + */ 407 + void (*set_cs)(struct spi_device *spi, bool enable); 408 + int (*transfer_one)(struct spi_master *master, struct spi_device *spi, 409 + struct spi_transfer *transfer); 409 410 410 411 /* gpio chip select */ 411 412 int *cs_gpios; ··· 455 428 /* Calls the driver make to interact with the message queue */ 456 429 extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); 457 430 extern void spi_finalize_current_message(struct spi_master *master); 431 + extern void spi_finalize_current_transfer(struct spi_master *master); 458 432 459 433 /* the spi driver core manages memory for the spi_master classdev */ 460 434 extern struct spi_master * 461 435 spi_alloc_master(struct device *host, unsigned size); 462 436 463 437 extern int spi_register_master(struct spi_master *master); 438 + extern int devm_spi_register_master(struct device *dev, 439 + struct spi_master *master); 464 440 extern void spi_unregister_master(struct spi_master *master); 465 441 466 442 extern struct spi_master *spi_busnum_to_master(u16 busnum); ··· 851 821 852 822 /* return negative errno or unsigned value */ 853 823 return (status < 0) ? status : result; 824 + } 825 + 826 + /** 827 + * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read 828 + * @spi: device with which data will be exchanged 829 + * @cmd: command to be written before data is read back 830 + * Context: can sleep 831 + * 832 + * This returns the (unsigned) sixteen bit number returned by the device in cpu 833 + * endianness, or else a negative error code. Callable only from contexts that 834 + * can sleep. 835 + * 836 + * This function is similar to spi_w8r16, with the exception that it will 837 + * convert the read 16 bit data word from big-endian to native endianness. 838 + * 839 + */ 840 + static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) 841 + 842 + { 843 + ssize_t status; 844 + __be16 result; 845 + 846 + status = spi_write_then_read(spi, &cmd, 1, &result, 2); 847 + if (status < 0) 848 + return status; 849 + 850 + return be16_to_cpu(result); 854 851 } 855 852 856 853 /*---------------------------------------------------------------------------*/
+156
include/trace/events/spi.h
··· 1 + #undef TRACE_SYSTEM 2 + #define TRACE_SYSTEM spi 3 + 4 + #if !defined(_TRACE_SPI_H) || defined(TRACE_HEADER_MULTI_READ) 5 + #define _TRACE_SPI_H 6 + 7 + #include <linux/ktime.h> 8 + #include <linux/tracepoint.h> 9 + 10 + DECLARE_EVENT_CLASS(spi_master, 11 + 12 + TP_PROTO(struct spi_master *master), 13 + 14 + TP_ARGS(master), 15 + 16 + TP_STRUCT__entry( 17 + __field( int, bus_num ) 18 + ), 19 + 20 + TP_fast_assign( 21 + __entry->bus_num = master->bus_num; 22 + ), 23 + 24 + TP_printk("spi%d", (int)__entry->bus_num) 25 + 26 + ); 27 + 28 + DEFINE_EVENT(spi_master, spi_master_idle, 29 + 30 + TP_PROTO(struct spi_master *master), 31 + 32 + TP_ARGS(master) 33 + 34 + ); 35 + 36 + DEFINE_EVENT(spi_master, spi_master_busy, 37 + 38 + TP_PROTO(struct spi_master *master), 39 + 40 + TP_ARGS(master) 41 + 42 + ); 43 + 44 + DECLARE_EVENT_CLASS(spi_message, 45 + 46 + TP_PROTO(struct spi_message *msg), 47 + 48 + TP_ARGS(msg), 49 + 50 + TP_STRUCT__entry( 51 + __field( int, bus_num ) 52 + __field( int, chip_select ) 53 + __field( struct spi_message *, msg ) 54 + ), 55 + 56 + TP_fast_assign( 57 + __entry->bus_num = msg->spi->master->bus_num; 58 + __entry->chip_select = msg->spi->chip_select; 59 + __entry->msg = msg; 60 + ), 61 + 62 + TP_printk("spi%d.%d %p", (int)__entry->bus_num, 63 + (int)__entry->chip_select, 64 + (struct spi_message *)__entry->msg) 65 + ); 66 + 67 + DEFINE_EVENT(spi_message, spi_message_submit, 68 + 69 + TP_PROTO(struct spi_message *msg), 70 + 71 + TP_ARGS(msg) 72 + 73 + ); 74 + 75 + DEFINE_EVENT(spi_message, spi_message_start, 76 + 77 + TP_PROTO(struct spi_message *msg), 78 + 79 + TP_ARGS(msg) 80 + 81 + ); 82 + 83 + TRACE_EVENT(spi_message_done, 84 + 85 + TP_PROTO(struct spi_message *msg), 86 + 87 + TP_ARGS(msg), 88 + 89 + TP_STRUCT__entry( 90 + __field( int, bus_num ) 91 + __field( int, chip_select ) 92 + __field( struct spi_message *, msg ) 93 + __field( unsigned, frame ) 94 + __field( unsigned, actual ) 95 + ), 96 + 97 + TP_fast_assign( 98 + __entry->bus_num = msg->spi->master->bus_num; 99 + __entry->chip_select = msg->spi->chip_select; 100 + __entry->msg = msg; 101 + __entry->frame = msg->frame_length; 102 + __entry->actual = msg->actual_length; 103 + ), 104 + 105 + TP_printk("spi%d.%d %p len=%u/%u", (int)__entry->bus_num, 106 + (int)__entry->chip_select, 107 + (struct spi_message *)__entry->msg, 108 + (unsigned)__entry->actual, (unsigned)__entry->frame) 109 + ); 110 + 111 + DECLARE_EVENT_CLASS(spi_transfer, 112 + 113 + TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), 114 + 115 + TP_ARGS(msg, xfer), 116 + 117 + TP_STRUCT__entry( 118 + __field( int, bus_num ) 119 + __field( int, chip_select ) 120 + __field( struct spi_transfer *, xfer ) 121 + __field( int, len ) 122 + ), 123 + 124 + TP_fast_assign( 125 + __entry->bus_num = msg->spi->master->bus_num; 126 + __entry->chip_select = msg->spi->chip_select; 127 + __entry->xfer = xfer; 128 + __entry->len = xfer->len; 129 + ), 130 + 131 + TP_printk("spi%d.%d %p len=%d", (int)__entry->bus_num, 132 + (int)__entry->chip_select, 133 + (struct spi_message *)__entry->xfer, 134 + (int)__entry->len) 135 + ); 136 + 137 + DEFINE_EVENT(spi_transfer, spi_transfer_start, 138 + 139 + TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), 140 + 141 + TP_ARGS(msg, xfer) 142 + 143 + ); 144 + 145 + DEFINE_EVENT(spi_transfer, spi_transfer_stop, 146 + 147 + TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), 148 + 149 + TP_ARGS(msg, xfer) 150 + 151 + ); 152 + 153 + #endif /* _TRACE_POWER_H */ 154 + 155 + /* This part must be outside protection */ 156 + #include <trace/define_trace.h>