Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge remote-tracking branches 'spi/topic/armada', 'spi/topic/ath79', 'spi/topic/atmel' and 'spi/topic/axi' into spi-next

+1064 -178
+25
Documentation/devicetree/bindings/spi/spi-armada-3700.txt
··· 1 + * Marvell Armada 3700 SPI Controller 2 + 3 + Required Properties: 4 + 5 + - compatible: should be "marvell,armada-3700-spi" 6 + - reg: physical base address of the controller and length of memory mapped 7 + region. 8 + - interrupts: The interrupt number. The interrupt specifier format depends on 9 + the interrupt controller and of its driver. 10 + - clocks: Must contain the clock source, usually from the North Bridge clocks. 11 + - num-cs: The number of chip selects that is supported by this SPI Controller 12 + - #address-cells: should be 1. 13 + - #size-cells: should be 0. 14 + 15 + Example: 16 + 17 + spi0: spi@10600 { 18 + compatible = "marvell,armada-3700-spi"; 19 + #address-cells = <1>; 20 + #size-cells = <0>; 21 + reg = <0x10600 0x5d>; 22 + clocks = <&nb_perih_clk 7>; 23 + interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>; 24 + num-cs = <4>; 25 + };
+7
drivers/spi/Kconfig
··· 67 67 This enables support for the SPI controller present on the 68 68 Atheros AR71XX/AR724X/AR913X SoCs. 69 69 70 + config SPI_ARMADA_3700 71 + tristate "Marvell Armada 3700 SPI Controller" 72 + depends on (ARCH_MVEBU && OF) || COMPILE_TEST 73 + help 74 + This enables support for the SPI controller present on the 75 + Marvell Armada 3700 SoCs. 76 + 70 77 config SPI_ATMEL 71 78 tristate "Atmel SPI Controller" 72 79 depends on HAS_DMA
+1
drivers/spi/Makefile
··· 12 12 13 13 # SPI master controller drivers (bus) 14 14 obj-$(CONFIG_SPI_ALTERA) += spi-altera.o 15 + obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o 15 16 obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o 16 17 obj-$(CONFIG_SPI_ATH79) += spi-ath79.o 17 18 obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
+923
drivers/spi/spi-armada-3700.c
··· 1 + /* 2 + * Marvell Armada-3700 SPI controller driver 3 + * 4 + * Copyright (C) 2016 Marvell Ltd. 5 + * 6 + * Author: Wilson Ding <dingwei@marvell.com> 7 + * Author: Romain Perier <romain.perier@free-electrons.com> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #include <linux/clk.h> 15 + #include <linux/completion.h> 16 + #include <linux/delay.h> 17 + #include <linux/err.h> 18 + #include <linux/interrupt.h> 19 + #include <linux/io.h> 20 + #include <linux/kernel.h> 21 + #include <linux/module.h> 22 + #include <linux/of.h> 23 + #include <linux/of_irq.h> 24 + #include <linux/of_device.h> 25 + #include <linux/pinctrl/consumer.h> 26 + #include <linux/spi/spi.h> 27 + 28 + #define DRIVER_NAME "armada_3700_spi" 29 + 30 + #define A3700_SPI_TIMEOUT 10 31 + 32 + /* SPI Register Offest */ 33 + #define A3700_SPI_IF_CTRL_REG 0x00 34 + #define A3700_SPI_IF_CFG_REG 0x04 35 + #define A3700_SPI_DATA_OUT_REG 0x08 36 + #define A3700_SPI_DATA_IN_REG 0x0C 37 + #define A3700_SPI_IF_INST_REG 0x10 38 + #define A3700_SPI_IF_ADDR_REG 0x14 39 + #define A3700_SPI_IF_RMODE_REG 0x18 40 + #define A3700_SPI_IF_HDR_CNT_REG 0x1C 41 + #define A3700_SPI_IF_DIN_CNT_REG 0x20 42 + #define A3700_SPI_IF_TIME_REG 0x24 43 + #define A3700_SPI_INT_STAT_REG 0x28 44 + #define A3700_SPI_INT_MASK_REG 0x2C 45 + 46 + /* A3700_SPI_IF_CTRL_REG */ 47 + #define A3700_SPI_EN BIT(16) 48 + #define A3700_SPI_ADDR_NOT_CONFIG BIT(12) 49 + #define A3700_SPI_WFIFO_OVERFLOW BIT(11) 50 + #define A3700_SPI_WFIFO_UNDERFLOW BIT(10) 51 + #define A3700_SPI_RFIFO_OVERFLOW BIT(9) 52 + #define A3700_SPI_RFIFO_UNDERFLOW BIT(8) 53 + #define A3700_SPI_WFIFO_FULL BIT(7) 54 + #define A3700_SPI_WFIFO_EMPTY BIT(6) 55 + #define A3700_SPI_RFIFO_FULL BIT(5) 56 + #define A3700_SPI_RFIFO_EMPTY BIT(4) 57 + #define A3700_SPI_WFIFO_RDY BIT(3) 58 + #define A3700_SPI_RFIFO_RDY BIT(2) 59 + #define A3700_SPI_XFER_RDY BIT(1) 60 + #define A3700_SPI_XFER_DONE BIT(0) 61 + 62 + /* A3700_SPI_IF_CFG_REG */ 63 + #define A3700_SPI_WFIFO_THRS BIT(28) 64 + #define A3700_SPI_RFIFO_THRS BIT(24) 65 + #define A3700_SPI_AUTO_CS BIT(20) 66 + #define A3700_SPI_DMA_RD_EN BIT(18) 67 + #define A3700_SPI_FIFO_MODE BIT(17) 68 + #define A3700_SPI_SRST BIT(16) 69 + #define A3700_SPI_XFER_START BIT(15) 70 + #define A3700_SPI_XFER_STOP BIT(14) 71 + #define A3700_SPI_INST_PIN BIT(13) 72 + #define A3700_SPI_ADDR_PIN BIT(12) 73 + #define A3700_SPI_DATA_PIN1 BIT(11) 74 + #define A3700_SPI_DATA_PIN0 BIT(10) 75 + #define A3700_SPI_FIFO_FLUSH BIT(9) 76 + #define A3700_SPI_RW_EN BIT(8) 77 + #define A3700_SPI_CLK_POL BIT(7) 78 + #define A3700_SPI_CLK_PHA BIT(6) 79 + #define A3700_SPI_BYTE_LEN BIT(5) 80 + #define A3700_SPI_CLK_PRESCALE BIT(0) 81 + #define A3700_SPI_CLK_PRESCALE_MASK (0x1f) 82 + 83 + #define A3700_SPI_WFIFO_THRS_BIT 28 84 + #define A3700_SPI_RFIFO_THRS_BIT 24 85 + #define A3700_SPI_FIFO_THRS_MASK 0x7 86 + 87 + #define A3700_SPI_DATA_PIN_MASK 0x3 88 + 89 + /* A3700_SPI_IF_HDR_CNT_REG */ 90 + #define A3700_SPI_DUMMY_CNT_BIT 12 91 + #define A3700_SPI_DUMMY_CNT_MASK 0x7 92 + #define A3700_SPI_RMODE_CNT_BIT 8 93 + #define A3700_SPI_RMODE_CNT_MASK 0x3 94 + #define A3700_SPI_ADDR_CNT_BIT 4 95 + #define A3700_SPI_ADDR_CNT_MASK 0x7 96 + #define A3700_SPI_INSTR_CNT_BIT 0 97 + #define A3700_SPI_INSTR_CNT_MASK 0x3 98 + 99 + /* A3700_SPI_IF_TIME_REG */ 100 + #define A3700_SPI_CLK_CAPT_EDGE BIT(7) 101 + 102 + /* Flags and macros for struct a3700_spi */ 103 + #define A3700_INSTR_CNT 1 104 + #define A3700_ADDR_CNT 3 105 + #define A3700_DUMMY_CNT 1 106 + 107 + struct a3700_spi { 108 + struct spi_master *master; 109 + void __iomem *base; 110 + struct clk *clk; 111 + unsigned int irq; 112 + unsigned int flags; 113 + bool xmit_data; 114 + const u8 *tx_buf; 115 + u8 *rx_buf; 116 + size_t buf_len; 117 + u8 byte_len; 118 + u32 wait_mask; 119 + struct completion done; 120 + u32 addr_cnt; 121 + u32 instr_cnt; 122 + size_t hdr_cnt; 123 + }; 124 + 125 + static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset) 126 + { 127 + return readl(a3700_spi->base + offset); 128 + } 129 + 130 + static void spireg_write(struct a3700_spi *a3700_spi, u32 offset, u32 data) 131 + { 132 + writel(data, a3700_spi->base + offset); 133 + } 134 + 135 + static void a3700_spi_auto_cs_unset(struct a3700_spi *a3700_spi) 136 + { 137 + u32 val; 138 + 139 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 140 + val &= ~A3700_SPI_AUTO_CS; 141 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 142 + } 143 + 144 + static void a3700_spi_activate_cs(struct a3700_spi *a3700_spi, unsigned int cs) 145 + { 146 + u32 val; 147 + 148 + val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); 149 + val |= (A3700_SPI_EN << cs); 150 + spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val); 151 + } 152 + 153 + static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi, 154 + unsigned int cs) 155 + { 156 + u32 val; 157 + 158 + val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); 159 + val &= ~(A3700_SPI_EN << cs); 160 + spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val); 161 + } 162 + 163 + static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi, 164 + unsigned int pin_mode) 165 + { 166 + u32 val; 167 + 168 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 169 + val &= ~(A3700_SPI_INST_PIN | A3700_SPI_ADDR_PIN); 170 + val &= ~(A3700_SPI_DATA_PIN0 | A3700_SPI_DATA_PIN1); 171 + 172 + switch (pin_mode) { 173 + case 1: 174 + break; 175 + case 2: 176 + val |= A3700_SPI_DATA_PIN0; 177 + break; 178 + case 4: 179 + val |= A3700_SPI_DATA_PIN1; 180 + break; 181 + default: 182 + dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode); 183 + return -EINVAL; 184 + } 185 + 186 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 187 + 188 + return 0; 189 + } 190 + 191 + static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi) 192 + { 193 + u32 val; 194 + 195 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 196 + val |= A3700_SPI_FIFO_MODE; 197 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 198 + } 199 + 200 + static void a3700_spi_mode_set(struct a3700_spi *a3700_spi, 201 + unsigned int mode_bits) 202 + { 203 + u32 val; 204 + 205 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 206 + 207 + if (mode_bits & SPI_CPOL) 208 + val |= A3700_SPI_CLK_POL; 209 + else 210 + val &= ~A3700_SPI_CLK_POL; 211 + 212 + if (mode_bits & SPI_CPHA) 213 + val |= A3700_SPI_CLK_PHA; 214 + else 215 + val &= ~A3700_SPI_CLK_PHA; 216 + 217 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 218 + } 219 + 220 + static void a3700_spi_clock_set(struct a3700_spi *a3700_spi, 221 + unsigned int speed_hz, u16 mode) 222 + { 223 + u32 val; 224 + u32 prescale; 225 + 226 + prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz); 227 + 228 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 229 + val = val & ~A3700_SPI_CLK_PRESCALE_MASK; 230 + 231 + val = val | (prescale & A3700_SPI_CLK_PRESCALE_MASK); 232 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 233 + 234 + if (prescale <= 2) { 235 + val = spireg_read(a3700_spi, A3700_SPI_IF_TIME_REG); 236 + val |= A3700_SPI_CLK_CAPT_EDGE; 237 + spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val); 238 + } 239 + 240 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 241 + val &= ~(A3700_SPI_CLK_POL | A3700_SPI_CLK_PHA); 242 + 243 + if (mode & SPI_CPOL) 244 + val |= A3700_SPI_CLK_POL; 245 + 246 + if (mode & SPI_CPHA) 247 + val |= A3700_SPI_CLK_PHA; 248 + 249 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 250 + } 251 + 252 + static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len) 253 + { 254 + u32 val; 255 + 256 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 257 + if (len == 4) 258 + val |= A3700_SPI_BYTE_LEN; 259 + else 260 + val &= ~A3700_SPI_BYTE_LEN; 261 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 262 + 263 + a3700_spi->byte_len = len; 264 + } 265 + 266 + static int a3700_spi_fifo_flush(struct a3700_spi *a3700_spi) 267 + { 268 + int timeout = A3700_SPI_TIMEOUT; 269 + u32 val; 270 + 271 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 272 + val |= A3700_SPI_FIFO_FLUSH; 273 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 274 + 275 + while (--timeout) { 276 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 277 + if (!(val & A3700_SPI_FIFO_FLUSH)) 278 + return 0; 279 + udelay(1); 280 + } 281 + 282 + return -ETIMEDOUT; 283 + } 284 + 285 + static int a3700_spi_init(struct a3700_spi *a3700_spi) 286 + { 287 + struct spi_master *master = a3700_spi->master; 288 + u32 val; 289 + int i, ret = 0; 290 + 291 + /* Reset SPI unit */ 292 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 293 + val |= A3700_SPI_SRST; 294 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 295 + 296 + udelay(A3700_SPI_TIMEOUT); 297 + 298 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 299 + val &= ~A3700_SPI_SRST; 300 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 301 + 302 + /* Disable AUTO_CS and deactivate all chip-selects */ 303 + a3700_spi_auto_cs_unset(a3700_spi); 304 + for (i = 0; i < master->num_chipselect; i++) 305 + a3700_spi_deactivate_cs(a3700_spi, i); 306 + 307 + /* Enable FIFO mode */ 308 + a3700_spi_fifo_mode_set(a3700_spi); 309 + 310 + /* Set SPI mode */ 311 + a3700_spi_mode_set(a3700_spi, master->mode_bits); 312 + 313 + /* Reset counters */ 314 + spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0); 315 + spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG, 0); 316 + 317 + /* Mask the interrupts and clear cause bits */ 318 + spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); 319 + spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, ~0U); 320 + 321 + return ret; 322 + } 323 + 324 + static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id) 325 + { 326 + struct spi_master *master = dev_id; 327 + struct a3700_spi *a3700_spi; 328 + u32 cause; 329 + 330 + a3700_spi = spi_master_get_devdata(master); 331 + 332 + /* Get interrupt causes */ 333 + cause = spireg_read(a3700_spi, A3700_SPI_INT_STAT_REG); 334 + 335 + if (!cause || !(a3700_spi->wait_mask & cause)) 336 + return IRQ_NONE; 337 + 338 + /* mask and acknowledge the SPI interrupts */ 339 + spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); 340 + spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, cause); 341 + 342 + /* Wake up the transfer */ 343 + if (a3700_spi->wait_mask & cause) 344 + complete(&a3700_spi->done); 345 + 346 + return IRQ_HANDLED; 347 + } 348 + 349 + static bool a3700_spi_wait_completion(struct spi_device *spi) 350 + { 351 + struct a3700_spi *a3700_spi; 352 + unsigned int timeout; 353 + unsigned int ctrl_reg; 354 + unsigned long timeout_jiffies; 355 + 356 + a3700_spi = spi_master_get_devdata(spi->master); 357 + 358 + /* SPI interrupt is edge-triggered, which means an interrupt will 359 + * be generated only when detecting a specific status bit changed 360 + * from '0' to '1'. So when we start waiting for a interrupt, we 361 + * need to check status bit in control reg first, if it is already 1, 362 + * then we do not need to wait for interrupt 363 + */ 364 + ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); 365 + if (a3700_spi->wait_mask & ctrl_reg) 366 + return true; 367 + 368 + reinit_completion(&a3700_spi->done); 369 + 370 + spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 371 + a3700_spi->wait_mask); 372 + 373 + timeout_jiffies = msecs_to_jiffies(A3700_SPI_TIMEOUT); 374 + timeout = wait_for_completion_timeout(&a3700_spi->done, 375 + timeout_jiffies); 376 + 377 + a3700_spi->wait_mask = 0; 378 + 379 + if (timeout) 380 + return true; 381 + 382 + /* there might be the case that right after we checked the 383 + * status bits in this routine and before start to wait for 384 + * interrupt by wait_for_completion_timeout, the interrupt 385 + * happens, to avoid missing it we need to double check 386 + * status bits in control reg, if it is already 1, then 387 + * consider that we have the interrupt successfully and 388 + * return true. 389 + */ 390 + ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); 391 + if (a3700_spi->wait_mask & ctrl_reg) 392 + return true; 393 + 394 + spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); 395 + 396 + return true; 397 + } 398 + 399 + static bool a3700_spi_transfer_wait(struct spi_device *spi, 400 + unsigned int bit_mask) 401 + { 402 + struct a3700_spi *a3700_spi; 403 + 404 + a3700_spi = spi_master_get_devdata(spi->master); 405 + a3700_spi->wait_mask = bit_mask; 406 + 407 + return a3700_spi_wait_completion(spi); 408 + } 409 + 410 + static void a3700_spi_fifo_thres_set(struct a3700_spi *a3700_spi, 411 + unsigned int bytes) 412 + { 413 + u32 val; 414 + 415 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 416 + val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_RFIFO_THRS_BIT); 417 + val |= (bytes - 1) << A3700_SPI_RFIFO_THRS_BIT; 418 + val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_WFIFO_THRS_BIT); 419 + val |= (7 - bytes) << A3700_SPI_WFIFO_THRS_BIT; 420 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 421 + } 422 + 423 + static void a3700_spi_transfer_setup(struct spi_device *spi, 424 + struct spi_transfer *xfer) 425 + { 426 + struct a3700_spi *a3700_spi; 427 + unsigned int byte_len; 428 + 429 + a3700_spi = spi_master_get_devdata(spi->master); 430 + 431 + a3700_spi_clock_set(a3700_spi, xfer->speed_hz, spi->mode); 432 + 433 + byte_len = xfer->bits_per_word >> 3; 434 + 435 + a3700_spi_fifo_thres_set(a3700_spi, byte_len); 436 + } 437 + 438 + static void a3700_spi_set_cs(struct spi_device *spi, bool enable) 439 + { 440 + struct a3700_spi *a3700_spi = spi_master_get_devdata(spi->master); 441 + 442 + if (!enable) 443 + a3700_spi_activate_cs(a3700_spi, spi->chip_select); 444 + else 445 + a3700_spi_deactivate_cs(a3700_spi, spi->chip_select); 446 + } 447 + 448 + static void a3700_spi_header_set(struct a3700_spi *a3700_spi) 449 + { 450 + u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0; 451 + u32 val = 0; 452 + 453 + /* Clear the header registers */ 454 + spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0); 455 + spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0); 456 + spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0); 457 + 458 + /* Set header counters */ 459 + if (a3700_spi->tx_buf) { 460 + if (a3700_spi->buf_len <= a3700_spi->instr_cnt) { 461 + instr_cnt = a3700_spi->buf_len; 462 + } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt + 463 + a3700_spi->addr_cnt)) { 464 + instr_cnt = a3700_spi->instr_cnt; 465 + addr_cnt = a3700_spi->buf_len - instr_cnt; 466 + } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) { 467 + instr_cnt = a3700_spi->instr_cnt; 468 + addr_cnt = a3700_spi->addr_cnt; 469 + /* Need to handle the normal write case with 1 byte 470 + * data 471 + */ 472 + if (!a3700_spi->tx_buf[instr_cnt + addr_cnt]) 473 + dummy_cnt = a3700_spi->buf_len - instr_cnt - 474 + addr_cnt; 475 + } 476 + val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK) 477 + << A3700_SPI_INSTR_CNT_BIT); 478 + val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK) 479 + << A3700_SPI_ADDR_CNT_BIT); 480 + val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK) 481 + << A3700_SPI_DUMMY_CNT_BIT); 482 + } 483 + spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val); 484 + 485 + /* Update the buffer length to be transferred */ 486 + a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt); 487 + 488 + /* Set Instruction */ 489 + val = 0; 490 + while (instr_cnt--) { 491 + val = (val << 8) | a3700_spi->tx_buf[0]; 492 + a3700_spi->tx_buf++; 493 + } 494 + spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val); 495 + 496 + /* Set Address */ 497 + val = 0; 498 + while (addr_cnt--) { 499 + val = (val << 8) | a3700_spi->tx_buf[0]; 500 + a3700_spi->tx_buf++; 501 + } 502 + spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val); 503 + } 504 + 505 + static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi) 506 + { 507 + u32 val; 508 + 509 + val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); 510 + return (val & A3700_SPI_WFIFO_FULL); 511 + } 512 + 513 + static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi) 514 + { 515 + u32 val; 516 + int i = 0; 517 + 518 + while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) { 519 + val = 0; 520 + if (a3700_spi->buf_len >= 4) { 521 + val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf); 522 + spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val); 523 + 524 + a3700_spi->buf_len -= 4; 525 + a3700_spi->tx_buf += 4; 526 + } else { 527 + /* 528 + * If the remained buffer length is less than 4-bytes, 529 + * we should pad the write buffer with all ones. So that 530 + * it avoids overwrite the unexpected bytes following 531 + * the last one. 532 + */ 533 + val = GENMASK(31, 0); 534 + while (a3700_spi->buf_len) { 535 + val &= ~(0xff << (8 * i)); 536 + val |= *a3700_spi->tx_buf++ << (8 * i); 537 + i++; 538 + a3700_spi->buf_len--; 539 + 540 + spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 541 + val); 542 + } 543 + break; 544 + } 545 + } 546 + 547 + return 0; 548 + } 549 + 550 + static int a3700_is_rfifo_empty(struct a3700_spi *a3700_spi) 551 + { 552 + u32 val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); 553 + 554 + return (val & A3700_SPI_RFIFO_EMPTY); 555 + } 556 + 557 + static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi) 558 + { 559 + u32 val; 560 + 561 + while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) { 562 + val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG); 563 + if (a3700_spi->buf_len >= 4) { 564 + u32 data = le32_to_cpu(val); 565 + memcpy(a3700_spi->rx_buf, &data, 4); 566 + 567 + a3700_spi->buf_len -= 4; 568 + a3700_spi->rx_buf += 4; 569 + } else { 570 + /* 571 + * When remain bytes is not larger than 4, we should 572 + * avoid memory overwriting and just write the left rx 573 + * buffer bytes. 574 + */ 575 + while (a3700_spi->buf_len) { 576 + *a3700_spi->rx_buf = val & 0xff; 577 + val >>= 8; 578 + 579 + a3700_spi->buf_len--; 580 + a3700_spi->rx_buf++; 581 + } 582 + } 583 + } 584 + 585 + return 0; 586 + } 587 + 588 + static void a3700_spi_transfer_abort_fifo(struct a3700_spi *a3700_spi) 589 + { 590 + int timeout = A3700_SPI_TIMEOUT; 591 + u32 val; 592 + 593 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 594 + val |= A3700_SPI_XFER_STOP; 595 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 596 + 597 + while (--timeout) { 598 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 599 + if (!(val & A3700_SPI_XFER_START)) 600 + break; 601 + udelay(1); 602 + } 603 + 604 + a3700_spi_fifo_flush(a3700_spi); 605 + 606 + val &= ~A3700_SPI_XFER_STOP; 607 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 608 + } 609 + 610 + static int a3700_spi_prepare_message(struct spi_master *master, 611 + struct spi_message *message) 612 + { 613 + struct a3700_spi *a3700_spi = spi_master_get_devdata(master); 614 + struct spi_device *spi = message->spi; 615 + int ret; 616 + 617 + ret = clk_enable(a3700_spi->clk); 618 + if (ret) { 619 + dev_err(&spi->dev, "failed to enable clk with error %d\n", ret); 620 + return ret; 621 + } 622 + 623 + /* Flush the FIFOs */ 624 + ret = a3700_spi_fifo_flush(a3700_spi); 625 + if (ret) 626 + return ret; 627 + 628 + a3700_spi_bytelen_set(a3700_spi, 4); 629 + 630 + return 0; 631 + } 632 + 633 + static int a3700_spi_transfer_one(struct spi_master *master, 634 + struct spi_device *spi, 635 + struct spi_transfer *xfer) 636 + { 637 + struct a3700_spi *a3700_spi = spi_master_get_devdata(master); 638 + int ret = 0, timeout = A3700_SPI_TIMEOUT; 639 + unsigned int nbits = 0; 640 + u32 val; 641 + 642 + a3700_spi_transfer_setup(spi, xfer); 643 + 644 + a3700_spi->tx_buf = xfer->tx_buf; 645 + a3700_spi->rx_buf = xfer->rx_buf; 646 + a3700_spi->buf_len = xfer->len; 647 + 648 + /* SPI transfer headers */ 649 + a3700_spi_header_set(a3700_spi); 650 + 651 + if (xfer->tx_buf) 652 + nbits = xfer->tx_nbits; 653 + else if (xfer->rx_buf) 654 + nbits = xfer->rx_nbits; 655 + 656 + a3700_spi_pin_mode_set(a3700_spi, nbits); 657 + 658 + if (xfer->rx_buf) { 659 + /* Set read data length */ 660 + spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG, 661 + a3700_spi->buf_len); 662 + /* Start READ transfer */ 663 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 664 + val &= ~A3700_SPI_RW_EN; 665 + val |= A3700_SPI_XFER_START; 666 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 667 + } else if (xfer->tx_buf) { 668 + /* Start Write transfer */ 669 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 670 + val |= (A3700_SPI_XFER_START | A3700_SPI_RW_EN); 671 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 672 + 673 + /* 674 + * If there are data to be written to the SPI device, xmit_data 675 + * flag is set true; otherwise the instruction in SPI_INSTR does 676 + * not require data to be written to the SPI device, then 677 + * xmit_data flag is set false. 678 + */ 679 + a3700_spi->xmit_data = (a3700_spi->buf_len != 0); 680 + } 681 + 682 + while (a3700_spi->buf_len) { 683 + if (a3700_spi->tx_buf) { 684 + /* Wait wfifo ready */ 685 + if (!a3700_spi_transfer_wait(spi, 686 + A3700_SPI_WFIFO_RDY)) { 687 + dev_err(&spi->dev, 688 + "wait wfifo ready timed out\n"); 689 + ret = -ETIMEDOUT; 690 + goto error; 691 + } 692 + /* Fill up the wfifo */ 693 + ret = a3700_spi_fifo_write(a3700_spi); 694 + if (ret) 695 + goto error; 696 + } else if (a3700_spi->rx_buf) { 697 + /* Wait rfifo ready */ 698 + if (!a3700_spi_transfer_wait(spi, 699 + A3700_SPI_RFIFO_RDY)) { 700 + dev_err(&spi->dev, 701 + "wait rfifo ready timed out\n"); 702 + ret = -ETIMEDOUT; 703 + goto error; 704 + } 705 + /* Drain out the rfifo */ 706 + ret = a3700_spi_fifo_read(a3700_spi); 707 + if (ret) 708 + goto error; 709 + } 710 + } 711 + 712 + /* 713 + * Stop a write transfer in fifo mode: 714 + * - wait all the bytes in wfifo to be shifted out 715 + * - set XFER_STOP bit 716 + * - wait XFER_START bit clear 717 + * - clear XFER_STOP bit 718 + * Stop a read transfer in fifo mode: 719 + * - the hardware is to reset the XFER_START bit 720 + * after the number of bytes indicated in DIN_CNT 721 + * register 722 + * - just wait XFER_START bit clear 723 + */ 724 + if (a3700_spi->tx_buf) { 725 + if (a3700_spi->xmit_data) { 726 + /* 727 + * If there are data written to the SPI device, wait 728 + * until SPI_WFIFO_EMPTY is 1 to wait for all data to 729 + * transfer out of write FIFO. 730 + */ 731 + if (!a3700_spi_transfer_wait(spi, 732 + A3700_SPI_WFIFO_EMPTY)) { 733 + dev_err(&spi->dev, "wait wfifo empty timed out\n"); 734 + return -ETIMEDOUT; 735 + } 736 + } else { 737 + /* 738 + * If the instruction in SPI_INSTR does not require data 739 + * to be written to the SPI device, wait until SPI_RDY 740 + * is 1 for the SPI interface to be in idle. 741 + */ 742 + if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) { 743 + dev_err(&spi->dev, "wait xfer ready timed out\n"); 744 + return -ETIMEDOUT; 745 + } 746 + } 747 + 748 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 749 + val |= A3700_SPI_XFER_STOP; 750 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 751 + } 752 + 753 + while (--timeout) { 754 + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 755 + if (!(val & A3700_SPI_XFER_START)) 756 + break; 757 + udelay(1); 758 + } 759 + 760 + if (timeout == 0) { 761 + dev_err(&spi->dev, "wait transfer start clear timed out\n"); 762 + ret = -ETIMEDOUT; 763 + goto error; 764 + } 765 + 766 + val &= ~A3700_SPI_XFER_STOP; 767 + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); 768 + goto out; 769 + 770 + error: 771 + a3700_spi_transfer_abort_fifo(a3700_spi); 772 + out: 773 + spi_finalize_current_transfer(master); 774 + 775 + return ret; 776 + } 777 + 778 + static int a3700_spi_unprepare_message(struct spi_master *master, 779 + struct spi_message *message) 780 + { 781 + struct a3700_spi *a3700_spi = spi_master_get_devdata(master); 782 + 783 + clk_disable(a3700_spi->clk); 784 + 785 + return 0; 786 + } 787 + 788 + static const struct of_device_id a3700_spi_dt_ids[] = { 789 + { .compatible = "marvell,armada-3700-spi", .data = NULL }, 790 + {}, 791 + }; 792 + 793 + MODULE_DEVICE_TABLE(of, a3700_spi_dt_ids); 794 + 795 + static int a3700_spi_probe(struct platform_device *pdev) 796 + { 797 + struct device *dev = &pdev->dev; 798 + struct device_node *of_node = dev->of_node; 799 + struct resource *res; 800 + struct spi_master *master; 801 + struct a3700_spi *spi; 802 + u32 num_cs = 0; 803 + int ret = 0; 804 + 805 + master = spi_alloc_master(dev, sizeof(*spi)); 806 + if (!master) { 807 + dev_err(dev, "master allocation failed\n"); 808 + ret = -ENOMEM; 809 + goto out; 810 + } 811 + 812 + if (of_property_read_u32(of_node, "num-cs", &num_cs)) { 813 + dev_err(dev, "could not find num-cs\n"); 814 + ret = -ENXIO; 815 + goto error; 816 + } 817 + 818 + master->bus_num = pdev->id; 819 + master->dev.of_node = of_node; 820 + master->mode_bits = SPI_MODE_3; 821 + master->num_chipselect = num_cs; 822 + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32); 823 + master->prepare_message = a3700_spi_prepare_message; 824 + master->transfer_one = a3700_spi_transfer_one; 825 + master->unprepare_message = a3700_spi_unprepare_message; 826 + master->set_cs = a3700_spi_set_cs; 827 + master->flags = SPI_MASTER_HALF_DUPLEX; 828 + master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL | 829 + SPI_RX_QUAD | SPI_TX_QUAD); 830 + 831 + platform_set_drvdata(pdev, master); 832 + 833 + spi = spi_master_get_devdata(master); 834 + memset(spi, 0, sizeof(struct a3700_spi)); 835 + 836 + spi->master = master; 837 + spi->instr_cnt = A3700_INSTR_CNT; 838 + spi->addr_cnt = A3700_ADDR_CNT; 839 + spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT + 840 + A3700_DUMMY_CNT; 841 + 842 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 843 + spi->base = devm_ioremap_resource(dev, res); 844 + if (IS_ERR(spi->base)) { 845 + ret = PTR_ERR(spi->base); 846 + goto error; 847 + } 848 + 849 + spi->irq = platform_get_irq(pdev, 0); 850 + if (spi->irq < 0) { 851 + dev_err(dev, "could not get irq: %d\n", spi->irq); 852 + ret = -ENXIO; 853 + goto error; 854 + } 855 + 856 + init_completion(&spi->done); 857 + 858 + spi->clk = devm_clk_get(dev, NULL); 859 + if (IS_ERR(spi->clk)) { 860 + dev_err(dev, "could not find clk: %ld\n", PTR_ERR(spi->clk)); 861 + goto error; 862 + } 863 + 864 + ret = clk_prepare(spi->clk); 865 + if (ret) { 866 + dev_err(dev, "could not prepare clk: %d\n", ret); 867 + goto error; 868 + } 869 + 870 + ret = a3700_spi_init(spi); 871 + if (ret) 872 + goto error_clk; 873 + 874 + ret = devm_request_irq(dev, spi->irq, a3700_spi_interrupt, 0, 875 + dev_name(dev), master); 876 + if (ret) { 877 + dev_err(dev, "could not request IRQ: %d\n", ret); 878 + goto error_clk; 879 + } 880 + 881 + ret = devm_spi_register_master(dev, master); 882 + if (ret) { 883 + dev_err(dev, "Failed to register master\n"); 884 + goto error_clk; 885 + } 886 + 887 + return 0; 888 + 889 + error_clk: 890 + clk_disable_unprepare(spi->clk); 891 + error: 892 + spi_master_put(master); 893 + out: 894 + return ret; 895 + } 896 + 897 + static int a3700_spi_remove(struct platform_device *pdev) 898 + { 899 + struct spi_master *master = platform_get_drvdata(pdev); 900 + struct a3700_spi *spi = spi_master_get_devdata(master); 901 + 902 + clk_unprepare(spi->clk); 903 + spi_master_put(master); 904 + 905 + return 0; 906 + } 907 + 908 + static struct platform_driver a3700_spi_driver = { 909 + .driver = { 910 + .name = DRIVER_NAME, 911 + .owner = THIS_MODULE, 912 + .of_match_table = of_match_ptr(a3700_spi_dt_ids), 913 + }, 914 + .probe = a3700_spi_probe, 915 + .remove = a3700_spi_remove, 916 + }; 917 + 918 + module_platform_driver(a3700_spi_driver); 919 + 920 + MODULE_DESCRIPTION("Armada-3700 SPI driver"); 921 + MODULE_AUTHOR("Wilson Ding <dingwei@marvell.com>"); 922 + MODULE_LICENSE("GPL"); 923 + MODULE_ALIAS("platform:" DRIVER_NAME);
+1
drivers/spi/spi-ath79.c
··· 304 304 { .compatible = "qca,ar7100-spi", }, 305 305 { }, 306 306 }; 307 + MODULE_DEVICE_TABLE(of, ath79_spi_of_match); 307 308 308 309 static struct platform_driver ath79_spi_driver = { 309 310 .probe = ath79_spi_probe,
+106 -178
drivers/spi/spi-atmel.c
··· 265 265 266 266 #define AUTOSUSPEND_TIMEOUT 2000 267 267 268 - struct atmel_spi_dma { 269 - struct dma_chan *chan_rx; 270 - struct dma_chan *chan_tx; 271 - struct scatterlist sgrx; 272 - struct scatterlist sgtx; 273 - struct dma_async_tx_descriptor *data_desc_rx; 274 - struct dma_async_tx_descriptor *data_desc_tx; 275 - 276 - struct at_dma_slave dma_slave; 277 - }; 278 - 279 268 struct atmel_spi_caps { 280 269 bool is_spi2; 281 270 bool has_wdrbt; ··· 293 304 294 305 struct completion xfer_completion; 295 306 296 - /* scratch buffer */ 297 - void *buffer; 298 - dma_addr_t buffer_dma; 299 - 300 307 struct atmel_spi_caps caps; 301 308 302 309 bool use_dma; 303 310 bool use_pdc; 304 311 bool use_cs_gpios; 305 - /* dmaengine data */ 306 - struct atmel_spi_dma dma; 307 312 308 313 bool keep_cs; 309 314 bool cs_active; ··· 311 328 u32 csr; 312 329 }; 313 330 314 - #define BUFFER_SIZE PAGE_SIZE 331 + #define SPI_MAX_DMA_XFER 65535 /* true for both PDC and DMA */ 315 332 #define INVALID_DMA_ADDRESS 0xffffffff 316 333 317 334 /* ··· 441 458 return as->use_dma && xfer->len >= DMA_MIN_BYTES; 442 459 } 443 460 461 + static bool atmel_spi_can_dma(struct spi_master *master, 462 + struct spi_device *spi, 463 + struct spi_transfer *xfer) 464 + { 465 + struct atmel_spi *as = spi_master_get_devdata(master); 466 + 467 + return atmel_spi_use_dma(as, xfer); 468 + } 469 + 444 470 static int atmel_spi_dma_slave_config(struct atmel_spi *as, 445 471 struct dma_slave_config *slave_config, 446 472 u8 bits_per_word) 447 473 { 474 + struct spi_master *master = platform_get_drvdata(as->pdev); 448 475 int err = 0; 449 476 450 477 if (bits_per_word > 8) { ··· 486 493 * path works the same whether FIFOs are available (and enabled) or not. 487 494 */ 488 495 slave_config->direction = DMA_MEM_TO_DEV; 489 - if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) { 496 + if (dmaengine_slave_config(master->dma_tx, slave_config)) { 490 497 dev_err(&as->pdev->dev, 491 498 "failed to configure tx dma channel\n"); 492 499 err = -EINVAL; ··· 501 508 * enabled) or not. 502 509 */ 503 510 slave_config->direction = DMA_DEV_TO_MEM; 504 - if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) { 511 + if (dmaengine_slave_config(master->dma_rx, slave_config)) { 505 512 dev_err(&as->pdev->dev, 506 513 "failed to configure rx dma channel\n"); 507 514 err = -EINVAL; ··· 510 517 return err; 511 518 } 512 519 513 - static int atmel_spi_configure_dma(struct atmel_spi *as) 520 + static int atmel_spi_configure_dma(struct spi_master *master, 521 + struct atmel_spi *as) 514 522 { 515 523 struct dma_slave_config slave_config; 516 524 struct device *dev = &as->pdev->dev; ··· 521 527 dma_cap_zero(mask); 522 528 dma_cap_set(DMA_SLAVE, mask); 523 529 524 - as->dma.chan_tx = dma_request_slave_channel_reason(dev, "tx"); 525 - if (IS_ERR(as->dma.chan_tx)) { 526 - err = PTR_ERR(as->dma.chan_tx); 530 + master->dma_tx = dma_request_slave_channel_reason(dev, "tx"); 531 + if (IS_ERR(master->dma_tx)) { 532 + err = PTR_ERR(master->dma_tx); 527 533 if (err == -EPROBE_DEFER) { 528 534 dev_warn(dev, "no DMA channel available at the moment\n"); 529 - return err; 535 + goto error_clear; 530 536 } 531 537 dev_err(dev, 532 538 "DMA TX channel not available, SPI unable to use DMA\n"); 533 539 err = -EBUSY; 534 - goto error; 540 + goto error_clear; 535 541 } 536 542 537 543 /* 538 544 * No reason to check EPROBE_DEFER here since we have already requested 539 545 * tx channel. If it fails here, it's for another reason. 540 546 */ 541 - as->dma.chan_rx = dma_request_slave_channel(dev, "rx"); 547 + master->dma_rx = dma_request_slave_channel(dev, "rx"); 542 548 543 - if (!as->dma.chan_rx) { 549 + if (!master->dma_rx) { 544 550 dev_err(dev, 545 551 "DMA RX channel not available, SPI unable to use DMA\n"); 546 552 err = -EBUSY; ··· 553 559 554 560 dev_info(&as->pdev->dev, 555 561 "Using %s (tx) and %s (rx) for DMA transfers\n", 556 - dma_chan_name(as->dma.chan_tx), 557 - dma_chan_name(as->dma.chan_rx)); 562 + dma_chan_name(master->dma_tx), 563 + dma_chan_name(master->dma_rx)); 564 + 558 565 return 0; 559 566 error: 560 - if (as->dma.chan_rx) 561 - dma_release_channel(as->dma.chan_rx); 562 - if (!IS_ERR(as->dma.chan_tx)) 563 - dma_release_channel(as->dma.chan_tx); 567 + if (master->dma_rx) 568 + dma_release_channel(master->dma_rx); 569 + if (!IS_ERR(master->dma_tx)) 570 + dma_release_channel(master->dma_tx); 571 + error_clear: 572 + master->dma_tx = master->dma_rx = NULL; 564 573 return err; 565 574 } 566 575 567 - static void atmel_spi_stop_dma(struct atmel_spi *as) 576 + static void atmel_spi_stop_dma(struct spi_master *master) 568 577 { 569 - if (as->dma.chan_rx) 570 - dmaengine_terminate_all(as->dma.chan_rx); 571 - if (as->dma.chan_tx) 572 - dmaengine_terminate_all(as->dma.chan_tx); 578 + if (master->dma_rx) 579 + dmaengine_terminate_all(master->dma_rx); 580 + if (master->dma_tx) 581 + dmaengine_terminate_all(master->dma_tx); 573 582 } 574 583 575 - static void atmel_spi_release_dma(struct atmel_spi *as) 584 + static void atmel_spi_release_dma(struct spi_master *master) 576 585 { 577 - if (as->dma.chan_rx) 578 - dma_release_channel(as->dma.chan_rx); 579 - if (as->dma.chan_tx) 580 - dma_release_channel(as->dma.chan_tx); 586 + if (master->dma_rx) { 587 + dma_release_channel(master->dma_rx); 588 + master->dma_rx = NULL; 589 + } 590 + if (master->dma_tx) { 591 + dma_release_channel(master->dma_tx); 592 + master->dma_tx = NULL; 593 + } 581 594 } 582 595 583 596 /* This function is called by the DMA driver from tasklet context */ ··· 614 613 cpu_relax(); 615 614 } 616 615 617 - if (xfer->tx_buf) { 618 - if (xfer->bits_per_word > 8) 619 - spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos)); 620 - else 621 - spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos)); 622 - } else { 623 - spi_writel(as, TDR, 0); 624 - } 616 + if (xfer->bits_per_word > 8) 617 + spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos)); 618 + else 619 + spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos)); 625 620 626 621 dev_dbg(master->dev.parent, 627 622 " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", ··· 664 667 665 668 /* Fill TX FIFO */ 666 669 while (num_data >= 2) { 667 - if (xfer->tx_buf) { 668 - if (xfer->bits_per_word > 8) { 669 - td0 = *words++; 670 - td1 = *words++; 671 - } else { 672 - td0 = *bytes++; 673 - td1 = *bytes++; 674 - } 670 + if (xfer->bits_per_word > 8) { 671 + td0 = *words++; 672 + td1 = *words++; 675 673 } else { 676 - td0 = 0; 677 - td1 = 0; 674 + td0 = *bytes++; 675 + td1 = *bytes++; 678 676 } 679 677 680 678 spi_writel(as, TDR, (td1 << 16) | td0); ··· 677 685 } 678 686 679 687 if (num_data) { 680 - if (xfer->tx_buf) { 681 - if (xfer->bits_per_word > 8) 682 - td0 = *words++; 683 - else 684 - td0 = *bytes++; 685 - } else { 686 - td0 = 0; 687 - } 688 + if (xfer->bits_per_word > 8) 689 + td0 = *words++; 690 + else 691 + td0 = *bytes++; 688 692 689 693 spi_writew(as, TDR, td0); 690 694 num_data--; ··· 720 732 u32 *plen) 721 733 { 722 734 struct atmel_spi *as = spi_master_get_devdata(master); 723 - struct dma_chan *rxchan = as->dma.chan_rx; 724 - struct dma_chan *txchan = as->dma.chan_tx; 735 + struct dma_chan *rxchan = master->dma_rx; 736 + struct dma_chan *txchan = master->dma_tx; 725 737 struct dma_async_tx_descriptor *rxdesc; 726 738 struct dma_async_tx_descriptor *txdesc; 727 739 struct dma_slave_config slave_config; 728 740 dma_cookie_t cookie; 729 - u32 len = *plen; 730 741 731 742 dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n"); 732 743 ··· 736 749 /* release lock for DMA operations */ 737 750 atmel_spi_unlock(as); 738 751 739 - /* prepare the RX dma transfer */ 740 - sg_init_table(&as->dma.sgrx, 1); 741 - if (xfer->rx_buf) { 742 - as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen; 743 - } else { 744 - as->dma.sgrx.dma_address = as->buffer_dma; 745 - if (len > BUFFER_SIZE) 746 - len = BUFFER_SIZE; 747 - } 748 - 749 - /* prepare the TX dma transfer */ 750 - sg_init_table(&as->dma.sgtx, 1); 751 - if (xfer->tx_buf) { 752 - as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen; 753 - } else { 754 - as->dma.sgtx.dma_address = as->buffer_dma; 755 - if (len > BUFFER_SIZE) 756 - len = BUFFER_SIZE; 757 - memset(as->buffer, 0, len); 758 - } 759 - 760 - sg_dma_len(&as->dma.sgtx) = len; 761 - sg_dma_len(&as->dma.sgrx) = len; 762 - 763 - *plen = len; 752 + *plen = xfer->len; 764 753 765 754 if (atmel_spi_dma_slave_config(as, &slave_config, 766 755 xfer->bits_per_word)) 767 756 goto err_exit; 768 757 769 758 /* Send both scatterlists */ 770 - rxdesc = dmaengine_prep_slave_sg(rxchan, &as->dma.sgrx, 1, 759 + rxdesc = dmaengine_prep_slave_sg(rxchan, 760 + xfer->rx_sg.sgl, xfer->rx_sg.nents, 771 761 DMA_FROM_DEVICE, 772 762 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 773 763 if (!rxdesc) 774 764 goto err_dma; 775 765 776 - txdesc = dmaengine_prep_slave_sg(txchan, &as->dma.sgtx, 1, 766 + txdesc = dmaengine_prep_slave_sg(txchan, 767 + xfer->tx_sg.sgl, xfer->tx_sg.nents, 777 768 DMA_TO_DEVICE, 778 769 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 779 770 if (!txdesc) ··· 785 820 786 821 err_dma: 787 822 spi_writel(as, IDR, SPI_BIT(OVRES)); 788 - atmel_spi_stop_dma(as); 823 + atmel_spi_stop_dma(master); 789 824 err_exit: 790 825 atmel_spi_lock(as); 791 826 return -ENOMEM; ··· 797 832 dma_addr_t *rx_dma, 798 833 u32 *plen) 799 834 { 800 - struct atmel_spi *as = spi_master_get_devdata(master); 801 - u32 len = *plen; 802 - 803 - /* use scratch buffer only when rx or tx data is unspecified */ 804 - if (xfer->rx_buf) 805 - *rx_dma = xfer->rx_dma + xfer->len - *plen; 806 - else { 807 - *rx_dma = as->buffer_dma; 808 - if (len > BUFFER_SIZE) 809 - len = BUFFER_SIZE; 810 - } 811 - 812 - if (xfer->tx_buf) 813 - *tx_dma = xfer->tx_dma + xfer->len - *plen; 814 - else { 815 - *tx_dma = as->buffer_dma; 816 - if (len > BUFFER_SIZE) 817 - len = BUFFER_SIZE; 818 - memset(as->buffer, 0, len); 819 - dma_sync_single_for_device(&as->pdev->dev, 820 - as->buffer_dma, len, DMA_TO_DEVICE); 821 - } 822 - 823 - *plen = len; 835 + *rx_dma = xfer->rx_dma + xfer->len - *plen; 836 + *tx_dma = xfer->tx_dma + xfer->len - *plen; 837 + if (*plen > master->max_dma_len) 838 + *plen = master->max_dma_len; 824 839 } 825 840 826 841 static int atmel_spi_set_xfer_speed(struct atmel_spi *as, ··· 972 1027 u16 *rxp16; 973 1028 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 974 1029 975 - if (xfer->rx_buf) { 976 - if (xfer->bits_per_word > 8) { 977 - rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); 978 - *rxp16 = spi_readl(as, RDR); 979 - } else { 980 - rxp = ((u8 *)xfer->rx_buf) + xfer_pos; 981 - *rxp = spi_readl(as, RDR); 982 - } 1030 + if (xfer->bits_per_word > 8) { 1031 + rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); 1032 + *rxp16 = spi_readl(as, RDR); 983 1033 } else { 984 - spi_readl(as, RDR); 1034 + rxp = ((u8 *)xfer->rx_buf) + xfer_pos; 1035 + *rxp = spi_readl(as, RDR); 985 1036 } 986 1037 if (xfer->bits_per_word > 8) { 987 1038 if (as->current_remaining_bytes > 2) ··· 1016 1075 /* Read data */ 1017 1076 while (num_data) { 1018 1077 rd = spi_readl(as, RDR); 1019 - if (xfer->rx_buf) { 1020 - if (xfer->bits_per_word > 8) 1021 - *words++ = rd; 1022 - else 1023 - *bytes++ = rd; 1024 - } 1078 + if (xfer->bits_per_word > 8) 1079 + *words++ = rd; 1080 + else 1081 + *bytes++ = rd; 1025 1082 num_data--; 1026 1083 } 1027 1084 } ··· 1240 1301 * better fault reporting. 1241 1302 */ 1242 1303 if ((!msg->is_dma_mapped) 1243 - && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) { 1304 + && as->use_pdc) { 1244 1305 if (atmel_spi_dma_map_xfer(as, xfer) < 0) 1245 1306 return -ENOMEM; 1246 1307 } ··· 1313 1374 spi_readl(as, SR); 1314 1375 1315 1376 } else if (atmel_spi_use_dma(as, xfer)) { 1316 - atmel_spi_stop_dma(as); 1377 + atmel_spi_stop_dma(master); 1317 1378 } 1318 1379 1319 1380 if (!msg->is_dma_mapped 1320 - && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) 1381 + && as->use_pdc) 1321 1382 atmel_spi_dma_unmap_xfer(master, xfer); 1322 1383 1323 1384 return 0; ··· 1328 1389 } 1329 1390 1330 1391 if (!msg->is_dma_mapped 1331 - && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) 1392 + && as->use_pdc) 1332 1393 atmel_spi_dma_unmap_xfer(master, xfer); 1333 1394 1334 1395 if (xfer->delay_usecs) ··· 1450 1511 int cs_gpio = of_get_named_gpio(pdev->dev.of_node, 1451 1512 "cs-gpios", i); 1452 1513 1453 - if (cs_gpio == -EPROBE_DEFER) 1454 - return cs_gpio; 1514 + if (cs_gpio == -EPROBE_DEFER) 1515 + return cs_gpio; 1455 1516 1456 - if (gpio_is_valid(cs_gpio)) { 1457 - ret = devm_gpio_request(&pdev->dev, cs_gpio, 1458 - dev_name(&pdev->dev)); 1459 - if (ret) 1460 - return ret; 1461 - } 1517 + if (gpio_is_valid(cs_gpio)) { 1518 + ret = devm_gpio_request(&pdev->dev, cs_gpio, 1519 + dev_name(&pdev->dev)); 1520 + if (ret) 1521 + return ret; 1522 + } 1462 1523 } 1463 1524 1464 1525 return 0; ··· 1501 1562 master->bus_num = pdev->id; 1502 1563 master->num_chipselect = master->dev.of_node ? 0 : 4; 1503 1564 master->setup = atmel_spi_setup; 1565 + master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX); 1504 1566 master->transfer_one_message = atmel_spi_transfer_one_message; 1505 1567 master->cleanup = atmel_spi_cleanup; 1506 1568 master->auto_runtime_pm = true; 1569 + master->max_dma_len = SPI_MAX_DMA_XFER; 1570 + master->can_dma = atmel_spi_can_dma; 1507 1571 platform_set_drvdata(pdev, master); 1508 1572 1509 1573 as = spi_master_get_devdata(master); 1510 - 1511 - /* 1512 - * Scratch buffer is used for throwaway rx and tx data. 1513 - * It's coherent to minimize dcache pollution. 1514 - */ 1515 - as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, 1516 - &as->buffer_dma, GFP_KERNEL); 1517 - if (!as->buffer) 1518 - goto out_free; 1519 1574 1520 1575 spin_lock_init(&as->lock); 1521 1576 ··· 1517 1584 as->regs = devm_ioremap_resource(&pdev->dev, regs); 1518 1585 if (IS_ERR(as->regs)) { 1519 1586 ret = PTR_ERR(as->regs); 1520 - goto out_free_buffer; 1587 + goto out_unmap_regs; 1521 1588 } 1522 1589 as->phybase = regs->start; 1523 1590 as->irq = irq; ··· 1542 1609 as->use_dma = false; 1543 1610 as->use_pdc = false; 1544 1611 if (as->caps.has_dma_support) { 1545 - ret = atmel_spi_configure_dma(as); 1546 - if (ret == 0) 1612 + ret = atmel_spi_configure_dma(master, as); 1613 + if (ret == 0) { 1547 1614 as->use_dma = true; 1548 - else if (ret == -EPROBE_DEFER) 1615 + } else if (ret == -EPROBE_DEFER) { 1549 1616 return ret; 1617 + } 1550 1618 } else { 1551 1619 as->use_pdc = true; 1552 1620 } ··· 1592 1658 spi_writel(as, CR, SPI_BIT(FIFOEN)); 1593 1659 } 1594 1660 1595 - /* go! */ 1596 - dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", 1597 - (unsigned long)regs->start, irq); 1598 - 1599 1661 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT); 1600 1662 pm_runtime_use_autosuspend(&pdev->dev); 1601 1663 pm_runtime_set_active(&pdev->dev); ··· 1601 1671 if (ret) 1602 1672 goto out_free_dma; 1603 1673 1674 + /* go! */ 1675 + dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", 1676 + (unsigned long)regs->start, irq); 1677 + 1604 1678 return 0; 1605 1679 1606 1680 out_free_dma: ··· 1612 1678 pm_runtime_set_suspended(&pdev->dev); 1613 1679 1614 1680 if (as->use_dma) 1615 - atmel_spi_release_dma(as); 1681 + atmel_spi_release_dma(master); 1616 1682 1617 1683 spi_writel(as, CR, SPI_BIT(SWRST)); 1618 1684 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1619 1685 clk_disable_unprepare(clk); 1620 1686 out_free_irq: 1621 1687 out_unmap_regs: 1622 - out_free_buffer: 1623 - dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 1624 - as->buffer_dma); 1625 1688 out_free: 1626 1689 spi_master_put(master); 1627 1690 return ret; ··· 1634 1703 /* reset the hardware and block queue progress */ 1635 1704 spin_lock_irq(&as->lock); 1636 1705 if (as->use_dma) { 1637 - atmel_spi_stop_dma(as); 1638 - atmel_spi_release_dma(as); 1706 + atmel_spi_stop_dma(master); 1707 + atmel_spi_release_dma(master); 1639 1708 } 1640 1709 1641 1710 spi_writel(as, CR, SPI_BIT(SWRST)); 1642 1711 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1643 1712 spi_readl(as, SR); 1644 1713 spin_unlock_irq(&as->lock); 1645 - 1646 - dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 1647 - as->buffer_dma); 1648 1714 1649 1715 clk_disable_unprepare(as->clk); 1650 1716
+1
drivers/spi/spi-axi-spi-engine.c
··· 574 574 { .compatible = "adi,axi-spi-engine-1.00.a" }, 575 575 { }, 576 576 }; 577 + MODULE_DEVICE_TABLE(of, spi_engine_match_table); 577 578 578 579 static struct platform_driver spi_engine_driver = { 579 580 .probe = spi_engine_probe,