Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/err.h>
9#include <linux/interrupt.h>
10#include <linux/io.h>
11#include <linux/list.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/of_device.h>
15#include <linux/platform_device.h>
16#include <linux/pm_runtime.h>
17#include <linux/spi/spi.h>
18#include <linux/dmaengine.h>
19#include <linux/dma-mapping.h>
20
21#define QUP_CONFIG 0x0000
22#define QUP_STATE 0x0004
23#define QUP_IO_M_MODES 0x0008
24#define QUP_SW_RESET 0x000c
25#define QUP_OPERATIONAL 0x0018
26#define QUP_ERROR_FLAGS 0x001c
27#define QUP_ERROR_FLAGS_EN 0x0020
28#define QUP_OPERATIONAL_MASK 0x0028
29#define QUP_HW_VERSION 0x0030
30#define QUP_MX_OUTPUT_CNT 0x0100
31#define QUP_OUTPUT_FIFO 0x0110
32#define QUP_MX_WRITE_CNT 0x0150
33#define QUP_MX_INPUT_CNT 0x0200
34#define QUP_MX_READ_CNT 0x0208
35#define QUP_INPUT_FIFO 0x0218
36
37#define SPI_CONFIG 0x0300
38#define SPI_IO_CONTROL 0x0304
39#define SPI_ERROR_FLAGS 0x0308
40#define SPI_ERROR_FLAGS_EN 0x030c
41
42/* QUP_CONFIG fields */
43#define QUP_CONFIG_SPI_MODE (1 << 8)
44#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
45#define QUP_CONFIG_NO_INPUT BIT(7)
46#define QUP_CONFIG_NO_OUTPUT BIT(6)
47#define QUP_CONFIG_N 0x001f
48
49/* QUP_STATE fields */
50#define QUP_STATE_VALID BIT(2)
51#define QUP_STATE_RESET 0
52#define QUP_STATE_RUN 1
53#define QUP_STATE_PAUSE 3
54#define QUP_STATE_MASK 3
55#define QUP_STATE_CLEAR 2
56
57#define QUP_HW_VERSION_2_1_1 0x20010001
58
59/* QUP_IO_M_MODES fields */
60#define QUP_IO_M_PACK_EN BIT(15)
61#define QUP_IO_M_UNPACK_EN BIT(14)
62#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
63#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
64#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
65#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
66
67#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
68#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
69#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
70#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
71
72#define QUP_IO_M_MODE_FIFO 0
73#define QUP_IO_M_MODE_BLOCK 1
74#define QUP_IO_M_MODE_DMOV 2
75#define QUP_IO_M_MODE_BAM 3
76
77/* QUP_OPERATIONAL fields */
78#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
79#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
80#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
81#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
82#define QUP_OP_IN_SERVICE_FLAG BIT(9)
83#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
84#define QUP_OP_IN_FIFO_FULL BIT(7)
85#define QUP_OP_OUT_FIFO_FULL BIT(6)
86#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
87#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
88
89/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
90#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
91#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
92#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
93#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
94
95/* SPI_CONFIG fields */
96#define SPI_CONFIG_HS_MODE BIT(10)
97#define SPI_CONFIG_INPUT_FIRST BIT(9)
98#define SPI_CONFIG_LOOPBACK BIT(8)
99
100/* SPI_IO_CONTROL fields */
101#define SPI_IO_C_FORCE_CS BIT(11)
102#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
103#define SPI_IO_C_MX_CS_MODE BIT(8)
104#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
105#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
106#define SPI_IO_C_CS_SELECT_MASK 0x000c
107#define SPI_IO_C_TRISTATE_CS BIT(1)
108#define SPI_IO_C_NO_TRI_STATE BIT(0)
109
110/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
111#define SPI_ERROR_CLK_OVER_RUN BIT(1)
112#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
113
114#define SPI_NUM_CHIPSELECTS 4
115
116#define SPI_MAX_XFER (SZ_64K - 64)
117
118/* high speed mode is when bus rate is greater then 26MHz */
119#define SPI_HS_MIN_RATE 26000000
120#define SPI_MAX_RATE 50000000
121
122#define SPI_DELAY_THRESHOLD 1
123#define SPI_DELAY_RETRY 10
124
125struct spi_qup {
126 void __iomem *base;
127 struct device *dev;
128 struct clk *cclk; /* core clock */
129 struct clk *iclk; /* interface clock */
130 int irq;
131 spinlock_t lock;
132
133 int in_fifo_sz;
134 int out_fifo_sz;
135 int in_blk_sz;
136 int out_blk_sz;
137
138 struct spi_transfer *xfer;
139 struct completion done;
140 int error;
141 int w_size; /* bytes per SPI word */
142 int n_words;
143 int tx_bytes;
144 int rx_bytes;
145 const u8 *tx_buf;
146 u8 *rx_buf;
147 int qup_v1;
148
149 int mode;
150 struct dma_slave_config rx_conf;
151 struct dma_slave_config tx_conf;
152};
153
154static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
155
156static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
157{
158 u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
159
160 return (opflag & flag) != 0;
161}
162
163static inline bool spi_qup_is_dma_xfer(int mode)
164{
165 if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
166 return true;
167
168 return false;
169}
170
171/* get's the transaction size length */
172static inline unsigned int spi_qup_len(struct spi_qup *controller)
173{
174 return controller->n_words * controller->w_size;
175}
176
177static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
178{
179 u32 opstate = readl_relaxed(controller->base + QUP_STATE);
180
181 return opstate & QUP_STATE_VALID;
182}
183
184static int spi_qup_set_state(struct spi_qup *controller, u32 state)
185{
186 unsigned long loop;
187 u32 cur_state;
188
189 loop = 0;
190 while (!spi_qup_is_valid_state(controller)) {
191
192 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
193
194 if (++loop > SPI_DELAY_RETRY)
195 return -EIO;
196 }
197
198 if (loop)
199 dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
200 loop, state);
201
202 cur_state = readl_relaxed(controller->base + QUP_STATE);
203 /*
204 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
205 * of (b10) are required
206 */
207 if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
208 (state == QUP_STATE_RESET)) {
209 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
210 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
211 } else {
212 cur_state &= ~QUP_STATE_MASK;
213 cur_state |= state;
214 writel_relaxed(cur_state, controller->base + QUP_STATE);
215 }
216
217 loop = 0;
218 while (!spi_qup_is_valid_state(controller)) {
219
220 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
221
222 if (++loop > SPI_DELAY_RETRY)
223 return -EIO;
224 }
225
226 return 0;
227}
228
229static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
230{
231 u8 *rx_buf = controller->rx_buf;
232 int i, shift, num_bytes;
233 u32 word;
234
235 for (; num_words; num_words--) {
236
237 word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
238
239 num_bytes = min_t(int, spi_qup_len(controller) -
240 controller->rx_bytes,
241 controller->w_size);
242
243 if (!rx_buf) {
244 controller->rx_bytes += num_bytes;
245 continue;
246 }
247
248 for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
249 /*
250 * The data format depends on bytes per SPI word:
251 * 4 bytes: 0x12345678
252 * 2 bytes: 0x00001234
253 * 1 byte : 0x00000012
254 */
255 shift = BITS_PER_BYTE;
256 shift *= (controller->w_size - i - 1);
257 rx_buf[controller->rx_bytes] = word >> shift;
258 }
259 }
260}
261
262static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
263{
264 u32 remainder, words_per_block, num_words;
265 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
266
267 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
268 controller->w_size);
269 words_per_block = controller->in_blk_sz >> 2;
270
271 do {
272 /* ACK by clearing service flag */
273 writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
274 controller->base + QUP_OPERATIONAL);
275
276 if (is_block_mode) {
277 num_words = (remainder > words_per_block) ?
278 words_per_block : remainder;
279 } else {
280 if (!spi_qup_is_flag_set(controller,
281 QUP_OP_IN_FIFO_NOT_EMPTY))
282 break;
283
284 num_words = 1;
285 }
286
287 /* read up to the maximum transfer size available */
288 spi_qup_read_from_fifo(controller, num_words);
289
290 remainder -= num_words;
291
292 /* if block mode, check to see if next block is available */
293 if (is_block_mode && !spi_qup_is_flag_set(controller,
294 QUP_OP_IN_BLOCK_READ_REQ))
295 break;
296
297 } while (remainder);
298
299 /*
300 * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
301 * reads, it has to be cleared again at the very end. However, be sure
302 * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
303 * present and this is used to determine if transaction is complete
304 */
305 *opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
306 if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
307 writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
308 controller->base + QUP_OPERATIONAL);
309
310}
311
312static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
313{
314 const u8 *tx_buf = controller->tx_buf;
315 int i, num_bytes;
316 u32 word, data;
317
318 for (; num_words; num_words--) {
319 word = 0;
320
321 num_bytes = min_t(int, spi_qup_len(controller) -
322 controller->tx_bytes,
323 controller->w_size);
324 if (tx_buf)
325 for (i = 0; i < num_bytes; i++) {
326 data = tx_buf[controller->tx_bytes + i];
327 word |= data << (BITS_PER_BYTE * (3 - i));
328 }
329
330 controller->tx_bytes += num_bytes;
331
332 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
333 }
334}
335
336static void spi_qup_dma_done(void *data)
337{
338 struct spi_qup *qup = data;
339
340 complete(&qup->done);
341}
342
343static void spi_qup_write(struct spi_qup *controller)
344{
345 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
346 u32 remainder, words_per_block, num_words;
347
348 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
349 controller->w_size);
350 words_per_block = controller->out_blk_sz >> 2;
351
352 do {
353 /* ACK by clearing service flag */
354 writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
355 controller->base + QUP_OPERATIONAL);
356
357 if (is_block_mode) {
358 num_words = (remainder > words_per_block) ?
359 words_per_block : remainder;
360 } else {
361 if (spi_qup_is_flag_set(controller,
362 QUP_OP_OUT_FIFO_FULL))
363 break;
364
365 num_words = 1;
366 }
367
368 spi_qup_write_to_fifo(controller, num_words);
369
370 remainder -= num_words;
371
372 /* if block mode, check to see if next block is available */
373 if (is_block_mode && !spi_qup_is_flag_set(controller,
374 QUP_OP_OUT_BLOCK_WRITE_REQ))
375 break;
376
377 } while (remainder);
378}
379
380static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl,
381 unsigned int nents, enum dma_transfer_direction dir,
382 dma_async_tx_callback callback)
383{
384 struct spi_qup *qup = spi_master_get_devdata(master);
385 unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
386 struct dma_async_tx_descriptor *desc;
387 struct dma_chan *chan;
388 dma_cookie_t cookie;
389
390 if (dir == DMA_MEM_TO_DEV)
391 chan = master->dma_tx;
392 else
393 chan = master->dma_rx;
394
395 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
396 if (IS_ERR_OR_NULL(desc))
397 return desc ? PTR_ERR(desc) : -EINVAL;
398
399 desc->callback = callback;
400 desc->callback_param = qup;
401
402 cookie = dmaengine_submit(desc);
403
404 return dma_submit_error(cookie);
405}
406
407static void spi_qup_dma_terminate(struct spi_master *master,
408 struct spi_transfer *xfer)
409{
410 if (xfer->tx_buf)
411 dmaengine_terminate_all(master->dma_tx);
412 if (xfer->rx_buf)
413 dmaengine_terminate_all(master->dma_rx);
414}
415
416static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
417 u32 *nents)
418{
419 struct scatterlist *sg;
420 u32 total = 0;
421
422 for (sg = sgl; sg; sg = sg_next(sg)) {
423 unsigned int len = sg_dma_len(sg);
424
425 /* check for overflow as well as limit */
426 if (((total + len) < total) || ((total + len) > max))
427 break;
428
429 total += len;
430 (*nents)++;
431 }
432
433 return total;
434}
435
436static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
437 unsigned long timeout)
438{
439 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
440 struct spi_master *master = spi->master;
441 struct spi_qup *qup = spi_master_get_devdata(master);
442 struct scatterlist *tx_sgl, *rx_sgl;
443 int ret;
444
445 if (xfer->rx_buf)
446 rx_done = spi_qup_dma_done;
447 else if (xfer->tx_buf)
448 tx_done = spi_qup_dma_done;
449
450 rx_sgl = xfer->rx_sg.sgl;
451 tx_sgl = xfer->tx_sg.sgl;
452
453 do {
454 u32 rx_nents = 0, tx_nents = 0;
455
456 if (rx_sgl)
457 qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
458 SPI_MAX_XFER, &rx_nents) / qup->w_size;
459 if (tx_sgl)
460 qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
461 SPI_MAX_XFER, &tx_nents) / qup->w_size;
462 if (!qup->n_words)
463 return -EIO;
464
465 ret = spi_qup_io_config(spi, xfer);
466 if (ret)
467 return ret;
468
469 /* before issuing the descriptors, set the QUP to run */
470 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
471 if (ret) {
472 dev_warn(qup->dev, "cannot set RUN state\n");
473 return ret;
474 }
475 if (rx_sgl) {
476 ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
477 DMA_DEV_TO_MEM, rx_done);
478 if (ret)
479 return ret;
480 dma_async_issue_pending(master->dma_rx);
481 }
482
483 if (tx_sgl) {
484 ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
485 DMA_MEM_TO_DEV, tx_done);
486 if (ret)
487 return ret;
488
489 dma_async_issue_pending(master->dma_tx);
490 }
491
492 if (!wait_for_completion_timeout(&qup->done, timeout))
493 return -ETIMEDOUT;
494
495 for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
496 ;
497 for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
498 ;
499
500 } while (rx_sgl || tx_sgl);
501
502 return 0;
503}
504
505static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
506 unsigned long timeout)
507{
508 struct spi_master *master = spi->master;
509 struct spi_qup *qup = spi_master_get_devdata(master);
510 int ret, n_words, iterations, offset = 0;
511
512 n_words = qup->n_words;
513 iterations = n_words / SPI_MAX_XFER; /* round down */
514 qup->rx_buf = xfer->rx_buf;
515 qup->tx_buf = xfer->tx_buf;
516
517 do {
518 if (iterations)
519 qup->n_words = SPI_MAX_XFER;
520 else
521 qup->n_words = n_words % SPI_MAX_XFER;
522
523 if (qup->tx_buf && offset)
524 qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
525
526 if (qup->rx_buf && offset)
527 qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
528
529 /*
530 * if the transaction is small enough, we need
531 * to fallback to FIFO mode
532 */
533 if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
534 qup->mode = QUP_IO_M_MODE_FIFO;
535
536 ret = spi_qup_io_config(spi, xfer);
537 if (ret)
538 return ret;
539
540 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
541 if (ret) {
542 dev_warn(qup->dev, "cannot set RUN state\n");
543 return ret;
544 }
545
546 ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
547 if (ret) {
548 dev_warn(qup->dev, "cannot set PAUSE state\n");
549 return ret;
550 }
551
552 if (qup->mode == QUP_IO_M_MODE_FIFO)
553 spi_qup_write(qup);
554
555 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
556 if (ret) {
557 dev_warn(qup->dev, "cannot set RUN state\n");
558 return ret;
559 }
560
561 if (!wait_for_completion_timeout(&qup->done, timeout))
562 return -ETIMEDOUT;
563
564 offset++;
565 } while (iterations--);
566
567 return 0;
568}
569
570static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
571{
572 struct spi_qup *controller = dev_id;
573 u32 opflags, qup_err, spi_err;
574 int error = 0;
575
576 qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
577 spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
578 opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
579
580 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
581 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
582
583 if (qup_err) {
584 if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
585 dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
586 if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
587 dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
588 if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
589 dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
590 if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
591 dev_warn(controller->dev, "INPUT_OVER_RUN\n");
592
593 error = -EIO;
594 }
595
596 if (spi_err) {
597 if (spi_err & SPI_ERROR_CLK_OVER_RUN)
598 dev_warn(controller->dev, "CLK_OVER_RUN\n");
599 if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
600 dev_warn(controller->dev, "CLK_UNDER_RUN\n");
601
602 error = -EIO;
603 }
604
605 if (spi_qup_is_dma_xfer(controller->mode)) {
606 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
607 } else {
608 if (opflags & QUP_OP_IN_SERVICE_FLAG)
609 spi_qup_read(controller, &opflags);
610
611 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
612 spi_qup_write(controller);
613 }
614
615 if ((opflags & QUP_OP_MAX_INPUT_DONE_FLAG) || error)
616 complete(&controller->done);
617
618 return IRQ_HANDLED;
619}
620
621/* set clock freq ... bits per word, determine mode */
622static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
623{
624 struct spi_qup *controller = spi_master_get_devdata(spi->master);
625 int ret;
626
627 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
628 dev_err(controller->dev, "too big size for loopback %d > %d\n",
629 xfer->len, controller->in_fifo_sz);
630 return -EIO;
631 }
632
633 ret = clk_set_rate(controller->cclk, xfer->speed_hz);
634 if (ret) {
635 dev_err(controller->dev, "fail to set frequency %d",
636 xfer->speed_hz);
637 return -EIO;
638 }
639
640 controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
641 controller->n_words = xfer->len / controller->w_size;
642
643 if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
644 controller->mode = QUP_IO_M_MODE_FIFO;
645 else if (spi->master->can_dma &&
646 spi->master->can_dma(spi->master, spi, xfer) &&
647 spi->master->cur_msg_mapped)
648 controller->mode = QUP_IO_M_MODE_BAM;
649 else
650 controller->mode = QUP_IO_M_MODE_BLOCK;
651
652 return 0;
653}
654
655/* prep qup for another spi transaction of specific type */
656static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
657{
658 struct spi_qup *controller = spi_master_get_devdata(spi->master);
659 u32 config, iomode, control;
660 unsigned long flags;
661
662 spin_lock_irqsave(&controller->lock, flags);
663 controller->xfer = xfer;
664 controller->error = 0;
665 controller->rx_bytes = 0;
666 controller->tx_bytes = 0;
667 spin_unlock_irqrestore(&controller->lock, flags);
668
669
670 if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
671 dev_err(controller->dev, "cannot set RESET state\n");
672 return -EIO;
673 }
674
675 switch (controller->mode) {
676 case QUP_IO_M_MODE_FIFO:
677 writel_relaxed(controller->n_words,
678 controller->base + QUP_MX_READ_CNT);
679 writel_relaxed(controller->n_words,
680 controller->base + QUP_MX_WRITE_CNT);
681 /* must be zero for FIFO */
682 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
683 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
684 break;
685 case QUP_IO_M_MODE_BAM:
686 writel_relaxed(controller->n_words,
687 controller->base + QUP_MX_INPUT_CNT);
688 writel_relaxed(controller->n_words,
689 controller->base + QUP_MX_OUTPUT_CNT);
690 /* must be zero for BLOCK and BAM */
691 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
692 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
693
694 if (!controller->qup_v1) {
695 void __iomem *input_cnt;
696
697 input_cnt = controller->base + QUP_MX_INPUT_CNT;
698 /*
699 * for DMA transfers, both QUP_MX_INPUT_CNT and
700 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
701 * That case is a non-balanced transfer when there is
702 * only a rx_buf.
703 */
704 if (xfer->tx_buf)
705 writel_relaxed(0, input_cnt);
706 else
707 writel_relaxed(controller->n_words, input_cnt);
708
709 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
710 }
711 break;
712 case QUP_IO_M_MODE_BLOCK:
713 reinit_completion(&controller->done);
714 writel_relaxed(controller->n_words,
715 controller->base + QUP_MX_INPUT_CNT);
716 writel_relaxed(controller->n_words,
717 controller->base + QUP_MX_OUTPUT_CNT);
718 /* must be zero for BLOCK and BAM */
719 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
720 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
721 break;
722 default:
723 dev_err(controller->dev, "unknown mode = %d\n",
724 controller->mode);
725 return -EIO;
726 }
727
728 iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
729 /* Set input and output transfer mode */
730 iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
731
732 if (!spi_qup_is_dma_xfer(controller->mode))
733 iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
734 else
735 iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
736
737 iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
738 iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
739
740 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
741
742 control = readl_relaxed(controller->base + SPI_IO_CONTROL);
743
744 if (spi->mode & SPI_CPOL)
745 control |= SPI_IO_C_CLK_IDLE_HIGH;
746 else
747 control &= ~SPI_IO_C_CLK_IDLE_HIGH;
748
749 writel_relaxed(control, controller->base + SPI_IO_CONTROL);
750
751 config = readl_relaxed(controller->base + SPI_CONFIG);
752
753 if (spi->mode & SPI_LOOP)
754 config |= SPI_CONFIG_LOOPBACK;
755 else
756 config &= ~SPI_CONFIG_LOOPBACK;
757
758 if (spi->mode & SPI_CPHA)
759 config &= ~SPI_CONFIG_INPUT_FIRST;
760 else
761 config |= SPI_CONFIG_INPUT_FIRST;
762
763 /*
764 * HS_MODE improves signal stability for spi-clk high rates,
765 * but is invalid in loop back mode.
766 */
767 if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
768 config |= SPI_CONFIG_HS_MODE;
769 else
770 config &= ~SPI_CONFIG_HS_MODE;
771
772 writel_relaxed(config, controller->base + SPI_CONFIG);
773
774 config = readl_relaxed(controller->base + QUP_CONFIG);
775 config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
776 config |= xfer->bits_per_word - 1;
777 config |= QUP_CONFIG_SPI_MODE;
778
779 if (spi_qup_is_dma_xfer(controller->mode)) {
780 if (!xfer->tx_buf)
781 config |= QUP_CONFIG_NO_OUTPUT;
782 if (!xfer->rx_buf)
783 config |= QUP_CONFIG_NO_INPUT;
784 }
785
786 writel_relaxed(config, controller->base + QUP_CONFIG);
787
788 /* only write to OPERATIONAL_MASK when register is present */
789 if (!controller->qup_v1) {
790 u32 mask = 0;
791
792 /*
793 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
794 * status change in BAM mode
795 */
796
797 if (spi_qup_is_dma_xfer(controller->mode))
798 mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
799
800 writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
801 }
802
803 return 0;
804}
805
806static int spi_qup_transfer_one(struct spi_master *master,
807 struct spi_device *spi,
808 struct spi_transfer *xfer)
809{
810 struct spi_qup *controller = spi_master_get_devdata(master);
811 unsigned long timeout, flags;
812 int ret = -EIO;
813
814 ret = spi_qup_io_prep(spi, xfer);
815 if (ret)
816 return ret;
817
818 timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
819 timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
820 xfer->len) * 8, timeout);
821 timeout = 100 * msecs_to_jiffies(timeout);
822
823 reinit_completion(&controller->done);
824
825 spin_lock_irqsave(&controller->lock, flags);
826 controller->xfer = xfer;
827 controller->error = 0;
828 controller->rx_bytes = 0;
829 controller->tx_bytes = 0;
830 spin_unlock_irqrestore(&controller->lock, flags);
831
832 if (spi_qup_is_dma_xfer(controller->mode))
833 ret = spi_qup_do_dma(spi, xfer, timeout);
834 else
835 ret = spi_qup_do_pio(spi, xfer, timeout);
836
837 if (ret)
838 goto exit;
839
840exit:
841 spi_qup_set_state(controller, QUP_STATE_RESET);
842 spin_lock_irqsave(&controller->lock, flags);
843 if (!ret)
844 ret = controller->error;
845 spin_unlock_irqrestore(&controller->lock, flags);
846
847 if (ret && spi_qup_is_dma_xfer(controller->mode))
848 spi_qup_dma_terminate(master, xfer);
849
850 return ret;
851}
852
853static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
854 struct spi_transfer *xfer)
855{
856 struct spi_qup *qup = spi_master_get_devdata(master);
857 size_t dma_align = dma_get_cache_alignment();
858 int n_words;
859
860 if (xfer->rx_buf) {
861 if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
862 IS_ERR_OR_NULL(master->dma_rx))
863 return false;
864 if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
865 return false;
866 }
867
868 if (xfer->tx_buf) {
869 if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
870 IS_ERR_OR_NULL(master->dma_tx))
871 return false;
872 if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
873 return false;
874 }
875
876 n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
877 if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
878 return false;
879
880 return true;
881}
882
883static void spi_qup_release_dma(struct spi_master *master)
884{
885 if (!IS_ERR_OR_NULL(master->dma_rx))
886 dma_release_channel(master->dma_rx);
887 if (!IS_ERR_OR_NULL(master->dma_tx))
888 dma_release_channel(master->dma_tx);
889}
890
891static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
892{
893 struct spi_qup *spi = spi_master_get_devdata(master);
894 struct dma_slave_config *rx_conf = &spi->rx_conf,
895 *tx_conf = &spi->tx_conf;
896 struct device *dev = spi->dev;
897 int ret;
898
899 /* allocate dma resources, if available */
900 master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
901 if (IS_ERR(master->dma_rx))
902 return PTR_ERR(master->dma_rx);
903
904 master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
905 if (IS_ERR(master->dma_tx)) {
906 ret = PTR_ERR(master->dma_tx);
907 goto err_tx;
908 }
909
910 /* set DMA parameters */
911 rx_conf->direction = DMA_DEV_TO_MEM;
912 rx_conf->device_fc = 1;
913 rx_conf->src_addr = base + QUP_INPUT_FIFO;
914 rx_conf->src_maxburst = spi->in_blk_sz;
915
916 tx_conf->direction = DMA_MEM_TO_DEV;
917 tx_conf->device_fc = 1;
918 tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
919 tx_conf->dst_maxburst = spi->out_blk_sz;
920
921 ret = dmaengine_slave_config(master->dma_rx, rx_conf);
922 if (ret) {
923 dev_err(dev, "failed to configure RX channel\n");
924 goto err;
925 }
926
927 ret = dmaengine_slave_config(master->dma_tx, tx_conf);
928 if (ret) {
929 dev_err(dev, "failed to configure TX channel\n");
930 goto err;
931 }
932
933 return 0;
934
935err:
936 dma_release_channel(master->dma_tx);
937err_tx:
938 dma_release_channel(master->dma_rx);
939 return ret;
940}
941
942static void spi_qup_set_cs(struct spi_device *spi, bool val)
943{
944 struct spi_qup *controller;
945 u32 spi_ioc;
946 u32 spi_ioc_orig;
947
948 controller = spi_master_get_devdata(spi->master);
949 spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
950 spi_ioc_orig = spi_ioc;
951 if (!val)
952 spi_ioc |= SPI_IO_C_FORCE_CS;
953 else
954 spi_ioc &= ~SPI_IO_C_FORCE_CS;
955
956 if (spi_ioc != spi_ioc_orig)
957 writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
958}
959
960static int spi_qup_probe(struct platform_device *pdev)
961{
962 struct spi_master *master;
963 struct clk *iclk, *cclk;
964 struct spi_qup *controller;
965 struct resource *res;
966 struct device *dev;
967 void __iomem *base;
968 u32 max_freq, iomode, num_cs;
969 int ret, irq, size;
970
971 dev = &pdev->dev;
972 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
973 base = devm_ioremap_resource(dev, res);
974 if (IS_ERR(base))
975 return PTR_ERR(base);
976
977 irq = platform_get_irq(pdev, 0);
978 if (irq < 0)
979 return irq;
980
981 cclk = devm_clk_get(dev, "core");
982 if (IS_ERR(cclk))
983 return PTR_ERR(cclk);
984
985 iclk = devm_clk_get(dev, "iface");
986 if (IS_ERR(iclk))
987 return PTR_ERR(iclk);
988
989 /* This is optional parameter */
990 if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
991 max_freq = SPI_MAX_RATE;
992
993 if (!max_freq || max_freq > SPI_MAX_RATE) {
994 dev_err(dev, "invalid clock frequency %d\n", max_freq);
995 return -ENXIO;
996 }
997
998 ret = clk_prepare_enable(cclk);
999 if (ret) {
1000 dev_err(dev, "cannot enable core clock\n");
1001 return ret;
1002 }
1003
1004 ret = clk_prepare_enable(iclk);
1005 if (ret) {
1006 clk_disable_unprepare(cclk);
1007 dev_err(dev, "cannot enable iface clock\n");
1008 return ret;
1009 }
1010
1011 master = spi_alloc_master(dev, sizeof(struct spi_qup));
1012 if (!master) {
1013 clk_disable_unprepare(cclk);
1014 clk_disable_unprepare(iclk);
1015 dev_err(dev, "cannot allocate master\n");
1016 return -ENOMEM;
1017 }
1018
1019 /* use num-cs unless not present or out of range */
1020 if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
1021 num_cs > SPI_NUM_CHIPSELECTS)
1022 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1023 else
1024 master->num_chipselect = num_cs;
1025
1026 master->bus_num = pdev->id;
1027 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1028 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1029 master->max_speed_hz = max_freq;
1030 master->transfer_one = spi_qup_transfer_one;
1031 master->dev.of_node = pdev->dev.of_node;
1032 master->auto_runtime_pm = true;
1033 master->dma_alignment = dma_get_cache_alignment();
1034 master->max_dma_len = SPI_MAX_XFER;
1035
1036 platform_set_drvdata(pdev, master);
1037
1038 controller = spi_master_get_devdata(master);
1039
1040 controller->dev = dev;
1041 controller->base = base;
1042 controller->iclk = iclk;
1043 controller->cclk = cclk;
1044 controller->irq = irq;
1045
1046 ret = spi_qup_init_dma(master, res->start);
1047 if (ret == -EPROBE_DEFER)
1048 goto error;
1049 else if (!ret)
1050 master->can_dma = spi_qup_can_dma;
1051
1052 controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
1053
1054 if (!controller->qup_v1)
1055 master->set_cs = spi_qup_set_cs;
1056
1057 spin_lock_init(&controller->lock);
1058 init_completion(&controller->done);
1059
1060 iomode = readl_relaxed(base + QUP_IO_M_MODES);
1061
1062 size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
1063 if (size)
1064 controller->out_blk_sz = size * 16;
1065 else
1066 controller->out_blk_sz = 4;
1067
1068 size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
1069 if (size)
1070 controller->in_blk_sz = size * 16;
1071 else
1072 controller->in_blk_sz = 4;
1073
1074 size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
1075 controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
1076
1077 size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
1078 controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
1079
1080 dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
1081 controller->in_blk_sz, controller->in_fifo_sz,
1082 controller->out_blk_sz, controller->out_fifo_sz);
1083
1084 writel_relaxed(1, base + QUP_SW_RESET);
1085
1086 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1087 if (ret) {
1088 dev_err(dev, "cannot set RESET state\n");
1089 goto error_dma;
1090 }
1091
1092 writel_relaxed(0, base + QUP_OPERATIONAL);
1093 writel_relaxed(0, base + QUP_IO_M_MODES);
1094
1095 if (!controller->qup_v1)
1096 writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
1097
1098 writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
1099 base + SPI_ERROR_FLAGS_EN);
1100
1101 /* if earlier version of the QUP, disable INPUT_OVERRUN */
1102 if (controller->qup_v1)
1103 writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
1104 QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
1105 base + QUP_ERROR_FLAGS_EN);
1106
1107 writel_relaxed(0, base + SPI_CONFIG);
1108 writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
1109
1110 ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
1111 IRQF_TRIGGER_HIGH, pdev->name, controller);
1112 if (ret)
1113 goto error_dma;
1114
1115 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
1116 pm_runtime_use_autosuspend(dev);
1117 pm_runtime_set_active(dev);
1118 pm_runtime_enable(dev);
1119
1120 ret = devm_spi_register_master(dev, master);
1121 if (ret)
1122 goto disable_pm;
1123
1124 return 0;
1125
1126disable_pm:
1127 pm_runtime_disable(&pdev->dev);
1128error_dma:
1129 spi_qup_release_dma(master);
1130error:
1131 clk_disable_unprepare(cclk);
1132 clk_disable_unprepare(iclk);
1133 spi_master_put(master);
1134 return ret;
1135}
1136
1137#ifdef CONFIG_PM
1138static int spi_qup_pm_suspend_runtime(struct device *device)
1139{
1140 struct spi_master *master = dev_get_drvdata(device);
1141 struct spi_qup *controller = spi_master_get_devdata(master);
1142 u32 config;
1143
1144 /* Enable clocks auto gaiting */
1145 config = readl(controller->base + QUP_CONFIG);
1146 config |= QUP_CONFIG_CLOCK_AUTO_GATE;
1147 writel_relaxed(config, controller->base + QUP_CONFIG);
1148
1149 clk_disable_unprepare(controller->cclk);
1150 clk_disable_unprepare(controller->iclk);
1151
1152 return 0;
1153}
1154
1155static int spi_qup_pm_resume_runtime(struct device *device)
1156{
1157 struct spi_master *master = dev_get_drvdata(device);
1158 struct spi_qup *controller = spi_master_get_devdata(master);
1159 u32 config;
1160 int ret;
1161
1162 ret = clk_prepare_enable(controller->iclk);
1163 if (ret)
1164 return ret;
1165
1166 ret = clk_prepare_enable(controller->cclk);
1167 if (ret)
1168 return ret;
1169
1170 /* Disable clocks auto gaiting */
1171 config = readl_relaxed(controller->base + QUP_CONFIG);
1172 config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
1173 writel_relaxed(config, controller->base + QUP_CONFIG);
1174 return 0;
1175}
1176#endif /* CONFIG_PM */
1177
1178#ifdef CONFIG_PM_SLEEP
1179static int spi_qup_suspend(struct device *device)
1180{
1181 struct spi_master *master = dev_get_drvdata(device);
1182 struct spi_qup *controller = spi_master_get_devdata(master);
1183 int ret;
1184
1185 ret = spi_master_suspend(master);
1186 if (ret)
1187 return ret;
1188
1189 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1190 if (ret)
1191 return ret;
1192
1193 if (!pm_runtime_suspended(device)) {
1194 clk_disable_unprepare(controller->cclk);
1195 clk_disable_unprepare(controller->iclk);
1196 }
1197 return 0;
1198}
1199
1200static int spi_qup_resume(struct device *device)
1201{
1202 struct spi_master *master = dev_get_drvdata(device);
1203 struct spi_qup *controller = spi_master_get_devdata(master);
1204 int ret;
1205
1206 ret = clk_prepare_enable(controller->iclk);
1207 if (ret)
1208 return ret;
1209
1210 ret = clk_prepare_enable(controller->cclk);
1211 if (ret)
1212 return ret;
1213
1214 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1215 if (ret)
1216 return ret;
1217
1218 return spi_master_resume(master);
1219}
1220#endif /* CONFIG_PM_SLEEP */
1221
1222static int spi_qup_remove(struct platform_device *pdev)
1223{
1224 struct spi_master *master = dev_get_drvdata(&pdev->dev);
1225 struct spi_qup *controller = spi_master_get_devdata(master);
1226 int ret;
1227
1228 ret = pm_runtime_get_sync(&pdev->dev);
1229 if (ret < 0)
1230 return ret;
1231
1232 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1233 if (ret)
1234 return ret;
1235
1236 spi_qup_release_dma(master);
1237
1238 clk_disable_unprepare(controller->cclk);
1239 clk_disable_unprepare(controller->iclk);
1240
1241 pm_runtime_put_noidle(&pdev->dev);
1242 pm_runtime_disable(&pdev->dev);
1243
1244 return 0;
1245}
1246
1247static const struct of_device_id spi_qup_dt_match[] = {
1248 { .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
1249 { .compatible = "qcom,spi-qup-v2.1.1", },
1250 { .compatible = "qcom,spi-qup-v2.2.1", },
1251 { }
1252};
1253MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1254
1255static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1256 SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1257 SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1258 spi_qup_pm_resume_runtime,
1259 NULL)
1260};
1261
1262static struct platform_driver spi_qup_driver = {
1263 .driver = {
1264 .name = "spi_qup",
1265 .pm = &spi_qup_dev_pm_ops,
1266 .of_match_table = spi_qup_dt_match,
1267 },
1268 .probe = spi_qup_probe,
1269 .remove = spi_qup_remove,
1270};
1271module_platform_driver(spi_qup_driver);
1272
1273MODULE_LICENSE("GPL v2");
1274MODULE_ALIAS("platform:spi_qup");