Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Designware SPI core controller driver (refer pxa2xx_spi.c)
4 *
5 * Copyright (c) 2009, Intel Corporation.
6 */
7
8#include <linux/bitfield.h>
9#include <linux/bitops.h>
10#include <linux/dma-mapping.h>
11#include <linux/interrupt.h>
12#include <linux/module.h>
13#include <linux/preempt.h>
14#include <linux/highmem.h>
15#include <linux/delay.h>
16#include <linux/slab.h>
17#include <linux/spi/spi.h>
18#include <linux/spi/spi-mem.h>
19#include <linux/string.h>
20#include <linux/of.h>
21
22#include "internals.h"
23#include "spi-dw.h"
24
25#ifdef CONFIG_DEBUG_FS
26#include <linux/debugfs.h>
27#endif
28
29/* Slave spi_device related */
30struct dw_spi_chip_data {
31 u32 cr0;
32 u32 rx_sample_dly; /* RX sample delay */
33};
34
35#ifdef CONFIG_DEBUG_FS
36
37#define DW_SPI_DBGFS_REG(_name, _off) \
38{ \
39 .name = _name, \
40 .offset = _off, \
41}
42
43static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
44 DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0),
45 DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1),
46 DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR),
47 DW_SPI_DBGFS_REG("SER", DW_SPI_SER),
48 DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR),
49 DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR),
50 DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR),
51 DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR),
52 DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR),
53 DW_SPI_DBGFS_REG("SR", DW_SPI_SR),
54 DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR),
55 DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR),
56 DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
57 DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
58 DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
59 DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
60};
61
62static void dw_spi_debugfs_init(struct dw_spi *dws)
63{
64 char name[32];
65
66 snprintf(name, 32, "dw_spi%d", dws->ctlr->bus_num);
67 dws->debugfs = debugfs_create_dir(name, NULL);
68
69 dws->regset.regs = dw_spi_dbgfs_regs;
70 dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
71 dws->regset.base = dws->regs;
72 debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
73}
74
75static void dw_spi_debugfs_remove(struct dw_spi *dws)
76{
77 debugfs_remove_recursive(dws->debugfs);
78}
79
80#else
81static inline void dw_spi_debugfs_init(struct dw_spi *dws)
82{
83}
84
85static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
86{
87}
88#endif /* CONFIG_DEBUG_FS */
89
90void dw_spi_set_cs(struct spi_device *spi, bool enable)
91{
92 struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
93 bool cs_high = !!(spi->mode & SPI_CS_HIGH);
94
95 /*
96 * DW SPI controller demands any native CS being set in order to
97 * proceed with data transfer. So in order to activate the SPI
98 * communications we must set a corresponding bit in the Slave
99 * Enable register no matter whether the SPI core is configured to
100 * support active-high or active-low CS level.
101 */
102 if (cs_high == enable)
103 dw_writel(dws, DW_SPI_SER, BIT(spi_get_chipselect(spi, 0)));
104 else
105 dw_writel(dws, DW_SPI_SER, 0);
106}
107EXPORT_SYMBOL_NS_GPL(dw_spi_set_cs, "SPI_DW_CORE");
108
109/* Return the max entries we can fill into tx fifo */
110static inline u32 dw_spi_tx_max(struct dw_spi *dws)
111{
112 u32 tx_room, rxtx_gap;
113
114 tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
115
116 /*
117 * Another concern is about the tx/rx mismatch, we
118 * though to use (dws->fifo_len - rxflr - txflr) as
119 * one maximum value for tx, but it doesn't cover the
120 * data which is out of tx/rx fifo and inside the
121 * shift registers. So a control from sw point of
122 * view is taken.
123 */
124 rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
125
126 return min3((u32)dws->tx_len, tx_room, rxtx_gap);
127}
128
129/* Return the max entries we should read out of rx fifo */
130static inline u32 dw_spi_rx_max(struct dw_spi *dws)
131{
132 return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
133}
134
135static void dw_writer(struct dw_spi *dws)
136{
137 u32 max = dw_spi_tx_max(dws);
138 u32 txw = 0;
139
140 while (max--) {
141 if (dws->tx) {
142 if (dws->n_bytes == 1)
143 txw = *(u8 *)(dws->tx);
144 else if (dws->n_bytes == 2)
145 txw = *(u16 *)(dws->tx);
146 else
147 txw = *(u32 *)(dws->tx);
148
149 dws->tx += dws->n_bytes;
150 }
151 dw_write_io_reg(dws, DW_SPI_DR, txw);
152 --dws->tx_len;
153 }
154}
155
156static void dw_reader(struct dw_spi *dws)
157{
158 u32 max = dw_spi_rx_max(dws);
159 u32 rxw;
160
161 while (max--) {
162 rxw = dw_read_io_reg(dws, DW_SPI_DR);
163 if (dws->rx) {
164 if (dws->n_bytes == 1)
165 *(u8 *)(dws->rx) = rxw;
166 else if (dws->n_bytes == 2)
167 *(u16 *)(dws->rx) = rxw;
168 else
169 *(u32 *)(dws->rx) = rxw;
170
171 dws->rx += dws->n_bytes;
172 }
173 --dws->rx_len;
174 }
175}
176
177int dw_spi_check_status(struct dw_spi *dws, bool raw)
178{
179 u32 irq_status;
180 int ret = 0;
181
182 if (raw)
183 irq_status = dw_readl(dws, DW_SPI_RISR);
184 else
185 irq_status = dw_readl(dws, DW_SPI_ISR);
186
187 if (irq_status & DW_SPI_INT_RXOI) {
188 dev_err(&dws->ctlr->dev, "RX FIFO overflow detected\n");
189 ret = -EIO;
190 }
191
192 if (irq_status & DW_SPI_INT_RXUI) {
193 dev_err(&dws->ctlr->dev, "RX FIFO underflow detected\n");
194 ret = -EIO;
195 }
196
197 if (irq_status & DW_SPI_INT_TXOI) {
198 dev_err(&dws->ctlr->dev, "TX FIFO overflow detected\n");
199 ret = -EIO;
200 }
201
202 /* Generically handle the erroneous situation */
203 if (ret) {
204 dw_spi_reset_chip(dws);
205 if (dws->ctlr->cur_msg)
206 dws->ctlr->cur_msg->status = ret;
207 }
208
209 return ret;
210}
211EXPORT_SYMBOL_NS_GPL(dw_spi_check_status, "SPI_DW_CORE");
212
213static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
214{
215 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
216
217 if (dw_spi_check_status(dws, false)) {
218 spi_finalize_current_transfer(dws->ctlr);
219 return IRQ_HANDLED;
220 }
221
222 /*
223 * Read data from the Rx FIFO every time we've got a chance executing
224 * this method. If there is nothing left to receive, terminate the
225 * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a
226 * final stage of the transfer. By doing so we'll get the next IRQ
227 * right when the leftover incoming data is received.
228 */
229 dw_reader(dws);
230 if (!dws->rx_len) {
231 dw_spi_mask_intr(dws, 0xff);
232 spi_finalize_current_transfer(dws->ctlr);
233 } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
234 dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
235 }
236
237 /*
238 * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
239 * disabled after the data transmission is finished so not to
240 * have the TXE IRQ flood at the final stage of the transfer.
241 */
242 if (irq_status & DW_SPI_INT_TXEI) {
243 dw_writer(dws);
244 if (!dws->tx_len)
245 dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
246 }
247
248 return IRQ_HANDLED;
249}
250
251static irqreturn_t dw_spi_irq(int irq, void *dev_id)
252{
253 struct spi_controller *ctlr = dev_id;
254 struct dw_spi *dws = spi_controller_get_devdata(ctlr);
255 u16 irq_status = dw_readl(dws, DW_SPI_ISR) & DW_SPI_INT_MASK;
256
257 if (!irq_status)
258 return IRQ_NONE;
259
260 if (!ctlr->cur_msg) {
261 dw_spi_mask_intr(dws, 0xff);
262 return IRQ_HANDLED;
263 }
264
265 return dws->transfer_handler(dws);
266}
267
268static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
269{
270 u32 cr0 = 0;
271
272 if (dw_spi_ip_is(dws, PSSI)) {
273 /* CTRLR0[ 5: 4] Frame Format */
274 cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
275
276 /*
277 * SPI mode (SCPOL|SCPH)
278 * CTRLR0[ 6] Serial Clock Phase
279 * CTRLR0[ 7] Serial Clock Polarity
280 */
281 if (spi->mode & SPI_CPOL)
282 cr0 |= DW_PSSI_CTRLR0_SCPOL;
283 if (spi->mode & SPI_CPHA)
284 cr0 |= DW_PSSI_CTRLR0_SCPHA;
285
286 /* CTRLR0[11] Shift Register Loop */
287 if (spi->mode & SPI_LOOP)
288 cr0 |= DW_PSSI_CTRLR0_SRL;
289 } else {
290 /* CTRLR0[ 7: 6] Frame Format */
291 cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
292
293 /*
294 * SPI mode (SCPOL|SCPH)
295 * CTRLR0[ 8] Serial Clock Phase
296 * CTRLR0[ 9] Serial Clock Polarity
297 */
298 if (spi->mode & SPI_CPOL)
299 cr0 |= DW_HSSI_CTRLR0_SCPOL;
300 if (spi->mode & SPI_CPHA)
301 cr0 |= DW_HSSI_CTRLR0_SCPHA;
302
303 /* CTRLR0[13] Shift Register Loop */
304 if (spi->mode & SPI_LOOP)
305 cr0 |= DW_HSSI_CTRLR0_SRL;
306
307 /* CTRLR0[31] MST */
308 if (dw_spi_ver_is_ge(dws, HSSI, 102A))
309 cr0 |= DW_HSSI_CTRLR0_MST;
310 }
311
312 return cr0;
313}
314
315void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
316 struct dw_spi_cfg *cfg)
317{
318 struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
319 u32 cr0 = chip->cr0;
320 u32 speed_hz;
321 u16 clk_div;
322
323 /* CTRLR0[ 4/3: 0] or CTRLR0[ 20: 16] Data Frame Size */
324 cr0 |= (cfg->dfs - 1) << dws->dfs_offset;
325
326 if (dw_spi_ip_is(dws, PSSI))
327 /* CTRLR0[ 9:8] Transfer Mode */
328 cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_TMOD_MASK, cfg->tmode);
329 else
330 /* CTRLR0[11:10] Transfer Mode */
331 cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_TMOD_MASK, cfg->tmode);
332
333 dw_writel(dws, DW_SPI_CTRLR0, cr0);
334
335 if (spi_controller_is_target(dws->ctlr))
336 return;
337
338 if (cfg->tmode == DW_SPI_CTRLR0_TMOD_EPROMREAD ||
339 cfg->tmode == DW_SPI_CTRLR0_TMOD_RO)
340 dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
341
342 /* Note DW APB SSI clock divider doesn't support odd numbers */
343 clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
344 speed_hz = dws->max_freq / clk_div;
345
346 if (dws->current_freq != speed_hz) {
347 dw_spi_set_clk(dws, clk_div);
348 dws->current_freq = speed_hz;
349 }
350
351 /* Update RX sample delay if required */
352 if (dws->cur_rx_sample_dly != chip->rx_sample_dly) {
353 dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, chip->rx_sample_dly);
354 dws->cur_rx_sample_dly = chip->rx_sample_dly;
355 }
356}
357EXPORT_SYMBOL_NS_GPL(dw_spi_update_config, "SPI_DW_CORE");
358
359static void dw_spi_irq_setup(struct dw_spi *dws)
360{
361 u16 level;
362 u8 imask;
363
364 /*
365 * Originally Tx and Rx data lengths match. Rx FIFO Threshold level
366 * will be adjusted at the final stage of the IRQ-based SPI transfer
367 * execution so not to lose the leftover of the incoming data.
368 */
369 level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
370 dw_writel(dws, DW_SPI_TXFTLR, level);
371 dw_writel(dws, DW_SPI_RXFTLR, level - 1);
372
373 dws->transfer_handler = dw_spi_transfer_handler;
374
375 imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI |
376 DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI;
377 dw_spi_umask_intr(dws, imask);
378}
379
380/*
381 * The iterative procedure of the poll-based transfer is simple: write as much
382 * as possible to the Tx FIFO, wait until the pending to receive data is ready
383 * to be read, read it from the Rx FIFO and check whether the performed
384 * procedure has been successful.
385 *
386 * Note this method the same way as the IRQ-based transfer won't work well for
387 * the SPI devices connected to the controller with native CS due to the
388 * automatic CS assertion/de-assertion.
389 */
390static int dw_spi_poll_transfer(struct dw_spi *dws,
391 struct spi_transfer *transfer)
392{
393 struct spi_delay delay;
394 u16 nbits;
395 int ret;
396
397 delay.unit = SPI_DELAY_UNIT_SCK;
398 nbits = dws->n_bytes * BITS_PER_BYTE;
399
400 do {
401 dw_writer(dws);
402
403 delay.value = nbits * (dws->rx_len - dws->tx_len);
404 spi_delay_exec(&delay, transfer);
405
406 dw_reader(dws);
407
408 ret = dw_spi_check_status(dws, true);
409 if (ret)
410 return ret;
411 } while (dws->rx_len);
412
413 return 0;
414}
415
416static int dw_spi_transfer_one(struct spi_controller *ctlr,
417 struct spi_device *spi,
418 struct spi_transfer *transfer)
419{
420 struct dw_spi *dws = spi_controller_get_devdata(ctlr);
421 struct dw_spi_cfg cfg = {
422 .tmode = DW_SPI_CTRLR0_TMOD_TR,
423 .dfs = transfer->bits_per_word,
424 .freq = transfer->speed_hz,
425 };
426 int ret;
427
428 dws->dma_mapped = 0;
429 dws->n_bytes = spi_bpw_to_bytes(transfer->bits_per_word);
430 dws->tx = (void *)transfer->tx_buf;
431 dws->tx_len = transfer->len / dws->n_bytes;
432 dws->rx = transfer->rx_buf;
433 dws->rx_len = dws->tx_len;
434
435 /* Ensure the data above is visible for all CPUs */
436 smp_mb();
437
438 dw_spi_enable_chip(dws, 0);
439
440 dw_spi_update_config(dws, spi, &cfg);
441
442 transfer->effective_speed_hz = dws->current_freq;
443
444 /* Check if current transfer is a DMA transaction */
445 dws->dma_mapped = spi_xfer_is_dma_mapped(ctlr, spi, transfer);
446
447 /* For poll mode just disable all interrupts */
448 dw_spi_mask_intr(dws, 0xff);
449
450 if (dws->dma_mapped) {
451 ret = dws->dma_ops->dma_setup(dws, transfer);
452 if (ret)
453 return ret;
454 }
455
456 dw_spi_enable_chip(dws, 1);
457
458 if (dws->dma_mapped)
459 return dws->dma_ops->dma_transfer(dws, transfer);
460 else if (dws->irq == IRQ_NOTCONNECTED)
461 return dw_spi_poll_transfer(dws, transfer);
462
463 dw_spi_irq_setup(dws);
464
465 return 1;
466}
467
468static inline void dw_spi_abort(struct spi_controller *ctlr)
469{
470 struct dw_spi *dws = spi_controller_get_devdata(ctlr);
471
472 if (dws->dma_mapped)
473 dws->dma_ops->dma_stop(dws);
474
475 dw_spi_reset_chip(dws);
476}
477
478static void dw_spi_handle_err(struct spi_controller *ctlr,
479 struct spi_message *msg)
480{
481 dw_spi_abort(ctlr);
482}
483
484static int dw_spi_target_abort(struct spi_controller *ctlr)
485{
486 dw_spi_abort(ctlr);
487
488 return 0;
489}
490
491static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
492{
493 if (op->data.dir == SPI_MEM_DATA_IN)
494 op->data.nbytes = clamp_val(op->data.nbytes, 0, DW_SPI_NDF_MASK + 1);
495
496 return 0;
497}
498
499static bool dw_spi_supports_mem_op(struct spi_mem *mem,
500 const struct spi_mem_op *op)
501{
502 if (op->data.buswidth > 1 || op->addr.buswidth > 1 ||
503 op->dummy.buswidth > 1 || op->cmd.buswidth > 1)
504 return false;
505
506 return spi_mem_default_supports_op(mem, op);
507}
508
509static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
510{
511 unsigned int i, j, len;
512 u8 *out;
513
514 /*
515 * Calculate the total length of the EEPROM command transfer and
516 * either use the pre-allocated buffer or create a temporary one.
517 */
518 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
519 if (op->data.dir == SPI_MEM_DATA_OUT)
520 len += op->data.nbytes;
521
522 if (len <= DW_SPI_BUF_SIZE) {
523 out = dws->buf;
524 } else {
525 out = kzalloc(len, GFP_KERNEL);
526 if (!out)
527 return -ENOMEM;
528 }
529
530 /*
531 * Collect the operation code, address and dummy bytes into the single
532 * buffer. If it's a transfer with data to be sent, also copy it into the
533 * single buffer in order to speed the data transmission up.
534 */
535 for (i = 0; i < op->cmd.nbytes; ++i)
536 out[i] = DW_SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
537 for (j = 0; j < op->addr.nbytes; ++i, ++j)
538 out[i] = DW_SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
539 for (j = 0; j < op->dummy.nbytes; ++i, ++j)
540 out[i] = 0x0;
541
542 if (op->data.dir == SPI_MEM_DATA_OUT)
543 memcpy(&out[i], op->data.buf.out, op->data.nbytes);
544
545 dws->n_bytes = 1;
546 dws->tx = out;
547 dws->tx_len = len;
548 if (op->data.dir == SPI_MEM_DATA_IN) {
549 dws->rx = op->data.buf.in;
550 dws->rx_len = op->data.nbytes;
551 } else {
552 dws->rx = NULL;
553 dws->rx_len = 0;
554 }
555
556 return 0;
557}
558
559static void dw_spi_free_mem_buf(struct dw_spi *dws)
560{
561 if (dws->tx != dws->buf)
562 kfree(dws->tx);
563}
564
565static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
566{
567 u32 room, entries, sts;
568 unsigned int len;
569 u8 *buf;
570
571 /*
572 * At initial stage we just pre-fill the Tx FIFO in with no rush,
573 * since native CS hasn't been enabled yet and the automatic data
574 * transmission won't start til we do that.
575 */
576 len = min(dws->fifo_len, dws->tx_len);
577 buf = dws->tx;
578 while (len--)
579 dw_write_io_reg(dws, DW_SPI_DR, *buf++);
580
581 /*
582 * After setting any bit in the SER register the transmission will
583 * start automatically. We have to keep up with that procedure
584 * otherwise the CS de-assertion will happen whereupon the memory
585 * operation will be pre-terminated.
586 */
587 len = dws->tx_len - ((void *)buf - dws->tx);
588 dw_spi_set_cs(spi, false);
589 while (len) {
590 entries = readl_relaxed(dws->regs + DW_SPI_TXFLR);
591 if (!entries) {
592 dev_err(&dws->ctlr->dev, "CS de-assertion on Tx\n");
593 return -EIO;
594 }
595 room = min(dws->fifo_len - entries, len);
596 for (; room; --room, --len)
597 dw_write_io_reg(dws, DW_SPI_DR, *buf++);
598 }
599
600 /*
601 * Data fetching will start automatically if the EEPROM-read mode is
602 * activated. We have to keep up with the incoming data pace to
603 * prevent the Rx FIFO overflow causing the inbound data loss.
604 */
605 len = dws->rx_len;
606 buf = dws->rx;
607 while (len) {
608 entries = readl_relaxed(dws->regs + DW_SPI_RXFLR);
609 if (!entries) {
610 sts = readl_relaxed(dws->regs + DW_SPI_RISR);
611 if (sts & DW_SPI_INT_RXOI) {
612 dev_err(&dws->ctlr->dev, "FIFO overflow on Rx\n");
613 return -EIO;
614 }
615 continue;
616 }
617 entries = min(entries, len);
618 for (; entries; --entries, --len)
619 *buf++ = dw_read_io_reg(dws, DW_SPI_DR);
620 }
621
622 return 0;
623}
624
625static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
626{
627 return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY;
628}
629
630static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
631{
632 int retry = DW_SPI_WAIT_RETRIES;
633 struct spi_delay delay;
634 unsigned long ns, us;
635 u32 nents;
636
637 nents = dw_readl(dws, DW_SPI_TXFLR);
638 ns = NSEC_PER_SEC / dws->current_freq * nents;
639 ns *= dws->n_bytes * BITS_PER_BYTE;
640 if (ns <= NSEC_PER_USEC) {
641 delay.unit = SPI_DELAY_UNIT_NSECS;
642 delay.value = ns;
643 } else {
644 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
645 delay.unit = SPI_DELAY_UNIT_USECS;
646 delay.value = clamp_val(us, 0, USHRT_MAX);
647 }
648
649 while (dw_spi_ctlr_busy(dws) && retry--)
650 spi_delay_exec(&delay, NULL);
651
652 if (retry < 0) {
653 dev_err(&dws->ctlr->dev, "Mem op hanged up\n");
654 return -EIO;
655 }
656
657 return 0;
658}
659
660static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
661{
662 dw_spi_enable_chip(dws, 0);
663 dw_spi_set_cs(spi, true);
664 dw_spi_enable_chip(dws, 1);
665}
666
667/*
668 * The SPI memory operation implementation below is the best choice for the
669 * devices, which are selected by the native chip-select lane. It's
670 * specifically developed to workaround the problem with automatic chip-select
671 * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current
672 * SPI-mem core calls exec_op() callback only if the GPIO-based CS is
673 * unavailable.
674 */
675static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
676{
677 struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller);
678 struct dw_spi_cfg cfg;
679 unsigned long flags;
680 int ret;
681
682 /*
683 * Collect the outbound data into a single buffer to speed the
684 * transmission up at least on the initial stage.
685 */
686 ret = dw_spi_init_mem_buf(dws, op);
687 if (ret)
688 return ret;
689
690 /*
691 * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN
692 * operation. Transmit-only mode is suitable for the rest of them.
693 */
694 cfg.dfs = 8;
695 cfg.freq = clamp(op->max_freq, 0U, dws->max_mem_freq);
696 if (op->data.dir == SPI_MEM_DATA_IN) {
697 cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
698 cfg.ndf = op->data.nbytes;
699 } else {
700 cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
701 }
702
703 dw_spi_enable_chip(dws, 0);
704
705 dw_spi_update_config(dws, mem->spi, &cfg);
706
707 dw_spi_mask_intr(dws, 0xff);
708
709 dw_spi_enable_chip(dws, 1);
710
711 /*
712 * DW APB SSI controller has very nasty peculiarities. First originally
713 * (without any vendor-specific modifications) it doesn't provide a
714 * direct way to set and clear the native chip-select signal. Instead
715 * the controller asserts the CS lane if Tx FIFO isn't empty and a
716 * transmission is going on, and automatically de-asserts it back to
717 * the high level if the Tx FIFO doesn't have anything to be pushed
718 * out. Due to that a multi-tasking or heavy IRQs activity might be
719 * fatal, since the transfer procedure preemption may cause the Tx FIFO
720 * getting empty and sudden CS de-assertion, which in the middle of the
721 * transfer will most likely cause the data loss. Secondly the
722 * EEPROM-read or Read-only DW SPI transfer modes imply the incoming
723 * data being automatically pulled in into the Rx FIFO. So if the
724 * driver software is late in fetching the data from the FIFO before
725 * it's overflown, new incoming data will be lost. In order to make
726 * sure the executed memory operations are CS-atomic and to prevent the
727 * Rx FIFO overflow we have to disable the local interrupts so to block
728 * any preemption during the subsequent IO operations.
729 *
730 * Note. At some circumstances disabling IRQs may not help to prevent
731 * the problems described above. The CS de-assertion and Rx FIFO
732 * overflow may still happen due to the relatively slow system bus or
733 * CPU not working fast enough, so the write-then-read algo implemented
734 * here just won't keep up with the SPI bus data transfer. Such
735 * situation is highly platform specific and is supposed to be fixed by
736 * manually restricting the SPI bus frequency using the
737 * dws->max_mem_freq parameter.
738 */
739 local_irq_save(flags);
740 preempt_disable();
741
742 ret = dw_spi_write_then_read(dws, mem->spi);
743
744 local_irq_restore(flags);
745 preempt_enable();
746
747 /*
748 * Wait for the operation being finished and check the controller
749 * status only if there hasn't been any run-time error detected. In the
750 * former case it's just pointless. In the later one to prevent an
751 * additional error message printing since any hw error flag being set
752 * would be due to an error detected on the data transfer.
753 */
754 if (!ret) {
755 ret = dw_spi_wait_mem_op_done(dws);
756 if (!ret)
757 ret = dw_spi_check_status(dws, true);
758 }
759
760 dw_spi_stop_mem_op(dws, mem->spi);
761
762 dw_spi_free_mem_buf(dws);
763
764 return ret;
765}
766
767/*
768 * Initialize the default memory operations if a glue layer hasn't specified
769 * custom ones. Direct mapping operations will be preserved anyway since DW SPI
770 * controller doesn't have an embedded dirmap interface. Note the memory
771 * operations implemented in this driver is the best choice only for the DW APB
772 * SSI controller with standard native CS functionality. If a hardware vendor
773 * has fixed the automatic CS assertion/de-assertion peculiarity, then it will
774 * be safer to use the normal SPI-messages-based transfers implementation.
775 */
776static void dw_spi_init_mem_ops(struct dw_spi *dws)
777{
778 if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) &&
779 !dws->set_cs) {
780 dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size;
781 dws->mem_ops.supports_op = dw_spi_supports_mem_op;
782 dws->mem_ops.exec_op = dw_spi_exec_mem_op;
783 if (!dws->max_mem_freq)
784 dws->max_mem_freq = dws->max_freq;
785 }
786}
787
788/* This may be called twice for each spi dev */
789static int dw_spi_setup(struct spi_device *spi)
790{
791 struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
792 struct dw_spi_chip_data *chip;
793
794 /* Only alloc on first setup */
795 chip = spi_get_ctldata(spi);
796 if (!chip) {
797 struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
798 u32 rx_sample_dly_ns;
799
800 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
801 if (!chip)
802 return -ENOMEM;
803 spi_set_ctldata(spi, chip);
804 /* Get specific / default rx-sample-delay */
805 if (device_property_read_u32(&spi->dev,
806 "rx-sample-delay-ns",
807 &rx_sample_dly_ns) != 0)
808 /* Use default controller value */
809 rx_sample_dly_ns = dws->def_rx_sample_dly_ns;
810 chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns,
811 NSEC_PER_SEC /
812 dws->max_freq);
813 }
814
815 /*
816 * Update CR0 data each time the setup callback is invoked since
817 * the device parameters could have been changed, for instance, by
818 * the MMC SPI driver or something else.
819 */
820 chip->cr0 = dw_spi_prepare_cr0(dws, spi);
821
822 return 0;
823}
824
825static void dw_spi_cleanup(struct spi_device *spi)
826{
827 struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
828
829 kfree(chip);
830 spi_set_ctldata(spi, NULL);
831}
832
833/* Restart the controller, disable all interrupts, clean rx fifo */
834static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
835{
836 dw_spi_reset_chip(dws);
837
838 /*
839 * Retrieve the Synopsys component version if it hasn't been specified
840 * by the platform. CoreKit version ID is encoded as a 3-chars ASCII
841 * code enclosed with '*' (typical for the most of Synopsys IP-cores).
842 */
843 if (!dws->ver) {
844 dws->ver = dw_readl(dws, DW_SPI_VERSION);
845
846 dev_dbg(dev, "Synopsys DWC%sSSI v%c.%c%c\n",
847 dw_spi_ip_is(dws, PSSI) ? " APB " : " ",
848 DW_SPI_GET_BYTE(dws->ver, 3), DW_SPI_GET_BYTE(dws->ver, 2),
849 DW_SPI_GET_BYTE(dws->ver, 1));
850 }
851
852 if (spi_controller_is_target(dws->ctlr)) {
853 /* There is only one CS input signal in target mode */
854 dws->num_cs = 1;
855 } else {
856 /*
857 * Try to detect the number of native chip-selects if the platform
858 * driver didn't set it up. There can be up to 16 lines configured.
859 */
860 if (!dws->num_cs) {
861 u32 ser;
862
863 dw_writel(dws, DW_SPI_SER, 0xffff);
864 ser = dw_readl(dws, DW_SPI_SER);
865 dw_writel(dws, DW_SPI_SER, 0);
866
867 dws->num_cs = hweight16(ser);
868 }
869 }
870
871 /*
872 * Try to detect the FIFO depth if not set by interface driver,
873 * the depth could be from 2 to 256 from HW spec
874 */
875 if (!dws->fifo_len) {
876 u32 fifo;
877
878 for (fifo = 1; fifo < 256; fifo++) {
879 dw_writel(dws, DW_SPI_TXFTLR, fifo);
880 if (fifo != dw_readl(dws, DW_SPI_TXFTLR))
881 break;
882 }
883 dw_writel(dws, DW_SPI_TXFTLR, 0);
884
885 dws->fifo_len = (fifo == 1) ? 0 : fifo;
886 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
887 }
888
889 /*
890 * Detect CTRLR0.DFS field size and offset by testing the lowest bits
891 * writability. Note DWC SSI controller also has the extended DFS, but
892 * with zero offset.
893 */
894 if (dw_spi_ip_is(dws, PSSI)) {
895 u32 cr0, tmp = dw_readl(dws, DW_SPI_CTRLR0);
896
897 dw_spi_enable_chip(dws, 0);
898 dw_writel(dws, DW_SPI_CTRLR0, 0xffffffff);
899 cr0 = dw_readl(dws, DW_SPI_CTRLR0);
900 dw_writel(dws, DW_SPI_CTRLR0, tmp);
901 dw_spi_enable_chip(dws, 1);
902
903 if (!(cr0 & DW_PSSI_CTRLR0_DFS_MASK)) {
904 dws->caps |= DW_SPI_CAP_DFS32;
905 dws->dfs_offset = __bf_shf(DW_PSSI_CTRLR0_DFS32_MASK);
906 dev_dbg(dev, "Detected 32-bits max data frame size\n");
907 }
908 } else {
909 dws->caps |= DW_SPI_CAP_DFS32;
910 }
911
912 /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
913 if (dws->caps & DW_SPI_CAP_CS_OVERRIDE)
914 dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
915}
916
917static const struct spi_controller_mem_caps dw_spi_mem_caps = {
918 .per_op_freq = true,
919};
920
921int dw_spi_add_controller(struct device *dev, struct dw_spi *dws)
922{
923 struct spi_controller *ctlr;
924 bool target;
925 int ret;
926
927 if (!dws)
928 return -EINVAL;
929
930 target = device_property_read_bool(dev, "spi-slave");
931 if (target)
932 ctlr = spi_alloc_target(dev, 0);
933 else
934 ctlr = spi_alloc_host(dev, 0);
935
936 if (!ctlr)
937 return -ENOMEM;
938
939 device_set_node(&ctlr->dev, dev_fwnode(dev));
940
941 dws->ctlr = ctlr;
942 dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
943
944 spi_controller_set_devdata(ctlr, dws);
945
946 /* Basic HW init */
947 dw_spi_hw_init(dev, dws);
948
949 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
950 ctlr);
951 if (ret < 0 && ret != -ENOTCONN) {
952 dev_err(dev, "can not get IRQ\n");
953 goto err_free_ctlr;
954 }
955
956 dw_spi_init_mem_ops(dws);
957
958 ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
959 if (dws->caps & DW_SPI_CAP_DFS32)
960 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
961 else
962 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
963 ctlr->bus_num = dws->bus_num;
964 ctlr->num_chipselect = dws->num_cs;
965 ctlr->setup = dw_spi_setup;
966 ctlr->cleanup = dw_spi_cleanup;
967 ctlr->transfer_one = dw_spi_transfer_one;
968 ctlr->handle_err = dw_spi_handle_err;
969 ctlr->auto_runtime_pm = true;
970
971 if (!target) {
972 ctlr->use_gpio_descriptors = true;
973 ctlr->mode_bits |= SPI_LOOP;
974 if (dws->set_cs)
975 ctlr->set_cs = dws->set_cs;
976 else
977 ctlr->set_cs = dw_spi_set_cs;
978 if (dws->mem_ops.exec_op) {
979 ctlr->mem_ops = &dws->mem_ops;
980 ctlr->mem_caps = &dw_spi_mem_caps;
981 }
982 ctlr->max_speed_hz = dws->max_freq;
983 ctlr->flags = SPI_CONTROLLER_GPIO_SS;
984 } else {
985 ctlr->target_abort = dw_spi_target_abort;
986 }
987
988 /* Get default rx sample delay */
989 device_property_read_u32(dev, "rx-sample-delay-ns",
990 &dws->def_rx_sample_dly_ns);
991
992 if (dws->dma_ops && dws->dma_ops->dma_init) {
993 ret = dws->dma_ops->dma_init(dev, dws);
994 if (ret == -EPROBE_DEFER) {
995 goto err_free_irq;
996 } else if (ret) {
997 dev_warn(dev, "DMA init failed\n");
998 } else {
999 ctlr->can_dma = dws->dma_ops->can_dma;
1000 ctlr->flags |= SPI_CONTROLLER_MUST_TX;
1001 }
1002 }
1003
1004 ret = spi_register_controller(ctlr);
1005 if (ret) {
1006 dev_err_probe(dev, ret, "problem registering spi controller\n");
1007 goto err_dma_exit;
1008 }
1009
1010 dw_spi_debugfs_init(dws);
1011 return 0;
1012
1013err_dma_exit:
1014 if (dws->dma_ops && dws->dma_ops->dma_exit)
1015 dws->dma_ops->dma_exit(dws);
1016 dw_spi_enable_chip(dws, 0);
1017err_free_irq:
1018 free_irq(dws->irq, ctlr);
1019err_free_ctlr:
1020 spi_controller_put(ctlr);
1021 return ret;
1022}
1023EXPORT_SYMBOL_NS_GPL(dw_spi_add_controller, "SPI_DW_CORE");
1024
1025void dw_spi_remove_controller(struct dw_spi *dws)
1026{
1027 dw_spi_debugfs_remove(dws);
1028
1029 spi_unregister_controller(dws->ctlr);
1030
1031 if (dws->dma_ops && dws->dma_ops->dma_exit)
1032 dws->dma_ops->dma_exit(dws);
1033
1034 dw_spi_shutdown_chip(dws);
1035
1036 free_irq(dws->irq, dws->ctlr);
1037}
1038EXPORT_SYMBOL_NS_GPL(dw_spi_remove_controller, "SPI_DW_CORE");
1039
1040int dw_spi_suspend_controller(struct dw_spi *dws)
1041{
1042 int ret;
1043
1044 ret = spi_controller_suspend(dws->ctlr);
1045 if (ret)
1046 return ret;
1047
1048 dw_spi_shutdown_chip(dws);
1049 return 0;
1050}
1051EXPORT_SYMBOL_NS_GPL(dw_spi_suspend_controller, "SPI_DW_CORE");
1052
1053int dw_spi_resume_controller(struct dw_spi *dws)
1054{
1055 dw_spi_hw_init(&dws->ctlr->dev, dws);
1056 return spi_controller_resume(dws->ctlr);
1057}
1058EXPORT_SYMBOL_NS_GPL(dw_spi_resume_controller, "SPI_DW_CORE");
1059
1060MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
1061MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
1062MODULE_LICENSE("GPL v2");