Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Exceet Electronics GmbH
4 * Copyright (C) 2018 Bootlin
5 *
6 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
7 */
8#include <linux/dmaengine.h>
9#include <linux/iopoll.h>
10#include <linux/pm_runtime.h>
11#include <linux/spi/spi.h>
12#include <linux/spi/spi-mem.h>
13#include <linux/sched/task_stack.h>
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/spi-mem.h>
17
18#include "internals.h"
19
20#define SPI_MEM_MAX_BUSWIDTH 8
21
22/**
23 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
24 * memory operation
25 * @ctlr: the SPI controller requesting this dma_map()
26 * @op: the memory operation containing the buffer to map
27 * @sgt: a pointer to a non-initialized sg_table that will be filled by this
28 * function
29 *
30 * Some controllers might want to do DMA on the data buffer embedded in @op.
31 * This helper prepares everything for you and provides a ready-to-use
32 * sg_table. This function is not intended to be called from spi drivers.
33 * Only SPI controller drivers should use it.
34 * Note that the caller must ensure the memory region pointed by
35 * op->data.buf.{in,out} is DMA-able before calling this function.
36 *
37 * Return: 0 in case of success, a negative error code otherwise.
38 */
39int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
40 const struct spi_mem_op *op,
41 struct sg_table *sgt)
42{
43 struct device *dmadev;
44
45 if (!op->data.nbytes)
46 return -EINVAL;
47
48 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
49 dmadev = ctlr->dma_tx->device->dev;
50 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
51 dmadev = ctlr->dma_rx->device->dev;
52 else
53 dmadev = ctlr->dev.parent;
54
55 if (!dmadev)
56 return -EINVAL;
57
58 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
59 op->data.dir == SPI_MEM_DATA_IN ?
60 DMA_FROM_DEVICE : DMA_TO_DEVICE);
61}
62EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
63
64/**
65 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
66 * memory operation
67 * @ctlr: the SPI controller requesting this dma_unmap()
68 * @op: the memory operation containing the buffer to unmap
69 * @sgt: a pointer to an sg_table previously initialized by
70 * spi_controller_dma_map_mem_op_data()
71 *
72 * Some controllers might want to do DMA on the data buffer embedded in @op.
73 * This helper prepares things so that the CPU can access the
74 * op->data.buf.{in,out} buffer again.
75 *
76 * This function is not intended to be called from SPI drivers. Only SPI
77 * controller drivers should use it.
78 *
79 * This function should be called after the DMA operation has finished and is
80 * only valid if the previous spi_controller_dma_map_mem_op_data() call
81 * returned 0.
82 *
83 * Return: 0 in case of success, a negative error code otherwise.
84 */
85void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
86 const struct spi_mem_op *op,
87 struct sg_table *sgt)
88{
89 struct device *dmadev;
90
91 if (!op->data.nbytes)
92 return;
93
94 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
95 dmadev = ctlr->dma_tx->device->dev;
96 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
97 dmadev = ctlr->dma_rx->device->dev;
98 else
99 dmadev = ctlr->dev.parent;
100
101 spi_unmap_buf(ctlr, dmadev, sgt,
102 op->data.dir == SPI_MEM_DATA_IN ?
103 DMA_FROM_DEVICE : DMA_TO_DEVICE);
104}
105EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
106
107static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
108{
109 u32 mode = mem->spi->mode;
110
111 switch (buswidth) {
112 case 1:
113 return 0;
114
115 case 2:
116 if ((tx &&
117 (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
118 (!tx &&
119 (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
120 return 0;
121
122 break;
123
124 case 4:
125 if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
126 (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
127 return 0;
128
129 break;
130
131 case 8:
132 if ((tx && (mode & SPI_TX_OCTAL)) ||
133 (!tx && (mode & SPI_RX_OCTAL)))
134 return 0;
135
136 break;
137
138 default:
139 break;
140 }
141
142 return -ENOTSUPP;
143}
144
145static bool spi_mem_check_buswidth(struct spi_mem *mem,
146 const struct spi_mem_op *op)
147{
148 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
149 return false;
150
151 if (op->addr.nbytes &&
152 spi_check_buswidth_req(mem, op->addr.buswidth, true))
153 return false;
154
155 if (op->dummy.nbytes &&
156 spi_check_buswidth_req(mem, op->dummy.buswidth, true))
157 return false;
158
159 if (op->data.dir != SPI_MEM_NO_DATA &&
160 spi_check_buswidth_req(mem, op->data.buswidth,
161 op->data.dir == SPI_MEM_DATA_OUT))
162 return false;
163
164 return true;
165}
166
167bool spi_mem_default_supports_op(struct spi_mem *mem,
168 const struct spi_mem_op *op)
169{
170 struct spi_controller *ctlr = mem->spi->controller;
171 bool op_is_dtr =
172 op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr;
173
174 if (op_is_dtr) {
175 if (!spi_mem_controller_is_capable(ctlr, dtr))
176 return false;
177
178 if (op->data.swap16 && !spi_mem_controller_is_capable(ctlr, swap16))
179 return false;
180
181 if (op->cmd.nbytes != 2)
182 return false;
183 } else {
184 if (op->cmd.nbytes != 1)
185 return false;
186 }
187
188 if (op->data.ecc) {
189 if (!spi_mem_controller_is_capable(ctlr, ecc))
190 return false;
191 }
192
193 if (op->max_freq && mem->spi->controller->min_speed_hz &&
194 op->max_freq < mem->spi->controller->min_speed_hz)
195 return false;
196
197 if (op->max_freq &&
198 op->max_freq < mem->spi->max_speed_hz) {
199 if (!spi_mem_controller_is_capable(ctlr, per_op_freq))
200 return false;
201 }
202
203 return spi_mem_check_buswidth(mem, op);
204}
205EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
206
207static bool spi_mem_buswidth_is_valid(u8 buswidth)
208{
209 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
210 return false;
211
212 return true;
213}
214
215static int spi_mem_check_op(const struct spi_mem_op *op)
216{
217 if (!op->cmd.buswidth || !op->cmd.nbytes)
218 return -EINVAL;
219
220 if ((op->addr.nbytes && !op->addr.buswidth) ||
221 (op->dummy.nbytes && !op->dummy.buswidth) ||
222 (op->data.nbytes && !op->data.buswidth))
223 return -EINVAL;
224
225 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
226 !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
227 !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
228 !spi_mem_buswidth_is_valid(op->data.buswidth))
229 return -EINVAL;
230
231 /* Buffers must be DMA-able. */
232 if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_IN &&
233 object_is_on_stack(op->data.buf.in)))
234 return -EINVAL;
235
236 if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_OUT &&
237 object_is_on_stack(op->data.buf.out)))
238 return -EINVAL;
239
240 return 0;
241}
242
243static bool spi_mem_internal_supports_op(struct spi_mem *mem,
244 const struct spi_mem_op *op)
245{
246 struct spi_controller *ctlr = mem->spi->controller;
247
248 if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
249 return ctlr->mem_ops->supports_op(mem, op);
250
251 return spi_mem_default_supports_op(mem, op);
252}
253
254/**
255 * spi_mem_supports_op() - Check if a memory device and the controller it is
256 * connected to support a specific memory operation
257 * @mem: the SPI memory
258 * @op: the memory operation to check
259 *
260 * Some controllers are only supporting Single or Dual IOs, others might only
261 * support specific opcodes, or it can even be that the controller and device
262 * both support Quad IOs but the hardware prevents you from using it because
263 * only 2 IO lines are connected.
264 *
265 * This function checks whether a specific operation is supported.
266 *
267 * Return: true if @op is supported, false otherwise.
268 */
269bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
270{
271 /* Make sure the operation frequency is correct before going futher */
272 spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
273
274 if (spi_mem_check_op(op))
275 return false;
276
277 return spi_mem_internal_supports_op(mem, op);
278}
279EXPORT_SYMBOL_GPL(spi_mem_supports_op);
280
281static int spi_mem_access_start(struct spi_mem *mem)
282{
283 struct spi_controller *ctlr = mem->spi->controller;
284
285 /*
286 * Flush the message queue before executing our SPI memory
287 * operation to prevent preemption of regular SPI transfers.
288 */
289 spi_flush_queue(ctlr);
290
291 if (ctlr->auto_runtime_pm) {
292 int ret;
293
294 ret = pm_runtime_resume_and_get(ctlr->dev.parent);
295 if (ret < 0) {
296 dev_err(&ctlr->dev, "Failed to power device: %d\n",
297 ret);
298 return ret;
299 }
300 }
301
302 mutex_lock(&ctlr->bus_lock_mutex);
303 mutex_lock(&ctlr->io_mutex);
304
305 return 0;
306}
307
308static void spi_mem_access_end(struct spi_mem *mem)
309{
310 struct spi_controller *ctlr = mem->spi->controller;
311
312 mutex_unlock(&ctlr->io_mutex);
313 mutex_unlock(&ctlr->bus_lock_mutex);
314
315 if (ctlr->auto_runtime_pm)
316 pm_runtime_put(ctlr->dev.parent);
317}
318
319static void spi_mem_add_op_stats(struct spi_statistics __percpu *pcpu_stats,
320 const struct spi_mem_op *op, int exec_op_ret)
321{
322 struct spi_statistics *stats;
323 u64 len, l2len;
324
325 get_cpu();
326 stats = this_cpu_ptr(pcpu_stats);
327 u64_stats_update_begin(&stats->syncp);
328
329 /*
330 * We do not have the concept of messages or transfers. Let's consider
331 * that one operation is equivalent to one message and one transfer.
332 */
333 u64_stats_inc(&stats->messages);
334 u64_stats_inc(&stats->transfers);
335
336 /* Use the sum of all lengths as bytes count and histogram value. */
337 len = op->cmd.nbytes + op->addr.nbytes;
338 len += op->dummy.nbytes + op->data.nbytes;
339 u64_stats_add(&stats->bytes, len);
340 l2len = min(fls(len), SPI_STATISTICS_HISTO_SIZE) - 1;
341 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
342
343 /* Only account for data bytes as transferred bytes. */
344 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
345 u64_stats_add(&stats->bytes_tx, op->data.nbytes);
346 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
347 u64_stats_add(&stats->bytes_rx, op->data.nbytes);
348
349 /*
350 * A timeout is not an error, following the same behavior as
351 * spi_transfer_one_message().
352 */
353 if (exec_op_ret == -ETIMEDOUT)
354 u64_stats_inc(&stats->timedout);
355 else if (exec_op_ret)
356 u64_stats_inc(&stats->errors);
357
358 u64_stats_update_end(&stats->syncp);
359 put_cpu();
360}
361
362/**
363 * spi_mem_exec_op() - Execute a memory operation
364 * @mem: the SPI memory
365 * @op: the memory operation to execute
366 *
367 * Executes a memory operation.
368 *
369 * This function first checks that @op is supported and then tries to execute
370 * it.
371 *
372 * Return: 0 in case of success, a negative error code otherwise.
373 */
374int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
375{
376 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
377 struct spi_controller *ctlr = mem->spi->controller;
378 struct spi_transfer xfers[4] = { };
379 struct spi_message msg;
380 u8 *tmpbuf;
381 int ret;
382
383 /* Make sure the operation frequency is correct before going futher */
384 spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
385
386 dev_vdbg(&mem->spi->dev, "[cmd: 0x%02x][%dB addr: %#8llx][%2dB dummy][%4dB data %s] %d%c-%d%c-%d%c-%d%c @ %uHz\n",
387 op->cmd.opcode,
388 op->addr.nbytes, (op->addr.nbytes ? op->addr.val : 0),
389 op->dummy.nbytes,
390 op->data.nbytes, (op->data.nbytes ? (op->data.dir == SPI_MEM_DATA_IN ? " read" : "write") : " "),
391 op->cmd.buswidth, op->cmd.dtr ? 'D' : 'S',
392 op->addr.buswidth, op->addr.dtr ? 'D' : 'S',
393 op->dummy.buswidth, op->dummy.dtr ? 'D' : 'S',
394 op->data.buswidth, op->data.dtr ? 'D' : 'S',
395 op->max_freq ? op->max_freq : mem->spi->max_speed_hz);
396
397 ret = spi_mem_check_op(op);
398 if (ret)
399 return ret;
400
401 if (!spi_mem_internal_supports_op(mem, op))
402 return -EOPNOTSUPP;
403
404 if (ctlr->mem_ops && ctlr->mem_ops->exec_op && !spi_get_csgpiod(mem->spi, 0)) {
405 ret = spi_mem_access_start(mem);
406 if (ret)
407 return ret;
408
409 trace_spi_mem_start_op(mem, op);
410 ret = ctlr->mem_ops->exec_op(mem, op);
411 trace_spi_mem_stop_op(mem, op);
412
413 spi_mem_access_end(mem);
414
415 /*
416 * Some controllers only optimize specific paths (typically the
417 * read path) and expect the core to use the regular SPI
418 * interface in other cases.
419 */
420 if (!ret || (ret != -ENOTSUPP && ret != -EOPNOTSUPP)) {
421 spi_mem_add_op_stats(ctlr->pcpu_statistics, op, ret);
422 spi_mem_add_op_stats(mem->spi->pcpu_statistics, op, ret);
423
424 return ret;
425 }
426 }
427
428 tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
429
430 /*
431 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
432 * we're guaranteed that this buffer is DMA-able, as required by the
433 * SPI layer.
434 */
435 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
436 if (!tmpbuf)
437 return -ENOMEM;
438
439 spi_message_init(&msg);
440
441 tmpbuf[0] = op->cmd.opcode;
442 xfers[xferpos].tx_buf = tmpbuf;
443 xfers[xferpos].len = op->cmd.nbytes;
444 xfers[xferpos].tx_nbits = op->cmd.buswidth;
445 xfers[xferpos].speed_hz = op->max_freq;
446 spi_message_add_tail(&xfers[xferpos], &msg);
447 xferpos++;
448 totalxferlen++;
449
450 if (op->addr.nbytes) {
451 int i;
452
453 for (i = 0; i < op->addr.nbytes; i++)
454 tmpbuf[i + 1] = op->addr.val >>
455 (8 * (op->addr.nbytes - i - 1));
456
457 xfers[xferpos].tx_buf = tmpbuf + 1;
458 xfers[xferpos].len = op->addr.nbytes;
459 xfers[xferpos].tx_nbits = op->addr.buswidth;
460 xfers[xferpos].speed_hz = op->max_freq;
461 spi_message_add_tail(&xfers[xferpos], &msg);
462 xferpos++;
463 totalxferlen += op->addr.nbytes;
464 }
465
466 if (op->dummy.nbytes) {
467 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
468 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
469 xfers[xferpos].len = op->dummy.nbytes;
470 xfers[xferpos].tx_nbits = op->dummy.buswidth;
471 xfers[xferpos].dummy_data = 1;
472 xfers[xferpos].speed_hz = op->max_freq;
473 spi_message_add_tail(&xfers[xferpos], &msg);
474 xferpos++;
475 totalxferlen += op->dummy.nbytes;
476 }
477
478 if (op->data.nbytes) {
479 if (op->data.dir == SPI_MEM_DATA_IN) {
480 xfers[xferpos].rx_buf = op->data.buf.in;
481 xfers[xferpos].rx_nbits = op->data.buswidth;
482 } else {
483 xfers[xferpos].tx_buf = op->data.buf.out;
484 xfers[xferpos].tx_nbits = op->data.buswidth;
485 }
486
487 xfers[xferpos].len = op->data.nbytes;
488 xfers[xferpos].speed_hz = op->max_freq;
489 spi_message_add_tail(&xfers[xferpos], &msg);
490 xferpos++;
491 totalxferlen += op->data.nbytes;
492 }
493
494 ret = spi_sync(mem->spi, &msg);
495
496 kfree(tmpbuf);
497
498 if (ret)
499 return ret;
500
501 if (msg.actual_length != totalxferlen)
502 return -EIO;
503
504 return 0;
505}
506EXPORT_SYMBOL_GPL(spi_mem_exec_op);
507
508/**
509 * spi_mem_get_name() - Return the SPI mem device name to be used by the
510 * upper layer if necessary
511 * @mem: the SPI memory
512 *
513 * This function allows SPI mem users to retrieve the SPI mem device name.
514 * It is useful if the upper layer needs to expose a custom name for
515 * compatibility reasons.
516 *
517 * Return: a string containing the name of the memory device to be used
518 * by the SPI mem user
519 */
520const char *spi_mem_get_name(struct spi_mem *mem)
521{
522 return mem->name;
523}
524EXPORT_SYMBOL_GPL(spi_mem_get_name);
525
526/**
527 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
528 * match controller limitations
529 * @mem: the SPI memory
530 * @op: the operation to adjust
531 *
532 * Some controllers have FIFO limitations and must split a data transfer
533 * operation into multiple ones, others require a specific alignment for
534 * optimized accesses. This function allows SPI mem drivers to split a single
535 * operation into multiple sub-operations when required.
536 *
537 * Return: a negative error code if the controller can't properly adjust @op,
538 * 0 otherwise. Note that @op->data.nbytes will be updated if @op
539 * can't be handled in a single step.
540 */
541int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
542{
543 struct spi_controller *ctlr = mem->spi->controller;
544 size_t len;
545
546 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
547 return ctlr->mem_ops->adjust_op_size(mem, op);
548
549 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
550 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
551
552 if (len > spi_max_transfer_size(mem->spi))
553 return -EINVAL;
554
555 op->data.nbytes = min3((size_t)op->data.nbytes,
556 spi_max_transfer_size(mem->spi),
557 spi_max_message_size(mem->spi) -
558 len);
559 if (!op->data.nbytes)
560 return -EINVAL;
561 }
562
563 return 0;
564}
565EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
566
567/**
568 * spi_mem_adjust_op_freq() - Adjust the frequency of a SPI mem operation to
569 * match controller, PCB and chip limitations
570 * @mem: the SPI memory
571 * @op: the operation to adjust
572 *
573 * Some chips have per-op frequency limitations and must adapt the maximum
574 * speed. This function allows SPI mem drivers to set @op->max_freq to the
575 * maximum supported value.
576 */
577void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op)
578{
579 if (!op->max_freq || op->max_freq > mem->spi->max_speed_hz)
580 op->max_freq = mem->spi->max_speed_hz;
581}
582EXPORT_SYMBOL_GPL(spi_mem_adjust_op_freq);
583
584/**
585 * spi_mem_calc_op_duration() - Derives the theoretical length (in ns) of an
586 * operation. This helps finding the best variant
587 * among a list of possible choices.
588 * @mem: the SPI memory
589 * @op: the operation to benchmark
590 *
591 * Some chips have per-op frequency limitations, PCBs usually have their own
592 * limitations as well, and controllers can support dual, quad or even octal
593 * modes, sometimes in DTR. All these combinations make it impossible to
594 * statically list the best combination for all situations. If we want something
595 * accurate, all these combinations should be rated (eg. with a time estimate)
596 * and the best pick should be taken based on these calculations.
597 *
598 * Returns a ns estimate for the time this op would take, except if no
599 * frequency limit has been set, in this case we return the number of
600 * cycles nevertheless to allow callers to distinguish which operation
601 * would be the fastest at iso-frequency.
602 */
603u64 spi_mem_calc_op_duration(struct spi_mem *mem, struct spi_mem_op *op)
604{
605 u64 ncycles = 0;
606 u64 ps_per_cycles, duration;
607
608 spi_mem_adjust_op_freq(mem, op);
609
610 if (op->max_freq) {
611 ps_per_cycles = 1000000000000ULL;
612 do_div(ps_per_cycles, op->max_freq);
613 } else {
614 /* In this case, the unit is no longer a time unit */
615 ps_per_cycles = 1;
616 }
617
618 ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1);
619 ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1);
620
621 /* Dummy bytes are optional for some SPI flash memory operations */
622 if (op->dummy.nbytes)
623 ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
624
625 ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1);
626
627 /* Derive the duration in ps */
628 duration = ncycles * ps_per_cycles;
629 /* Convert into ns */
630 do_div(duration, 1000);
631
632 return duration;
633}
634EXPORT_SYMBOL_GPL(spi_mem_calc_op_duration);
635
636static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
637 u64 offs, size_t len, void *buf)
638{
639 struct spi_mem_op op = desc->info.op_tmpl;
640 int ret;
641
642 op.addr.val = desc->info.offset + offs;
643 op.data.buf.in = buf;
644 op.data.nbytes = len;
645 ret = spi_mem_adjust_op_size(desc->mem, &op);
646 if (ret)
647 return ret;
648
649 ret = spi_mem_exec_op(desc->mem, &op);
650 if (ret)
651 return ret;
652
653 return op.data.nbytes;
654}
655
656static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
657 u64 offs, size_t len, const void *buf)
658{
659 struct spi_mem_op op = desc->info.op_tmpl;
660 int ret;
661
662 op.addr.val = desc->info.offset + offs;
663 op.data.buf.out = buf;
664 op.data.nbytes = len;
665 ret = spi_mem_adjust_op_size(desc->mem, &op);
666 if (ret)
667 return ret;
668
669 ret = spi_mem_exec_op(desc->mem, &op);
670 if (ret)
671 return ret;
672
673 return op.data.nbytes;
674}
675
676/**
677 * spi_mem_dirmap_create() - Create a direct mapping descriptor
678 * @mem: SPI mem device this direct mapping should be created for
679 * @info: direct mapping information
680 *
681 * This function is creating a direct mapping descriptor which can then be used
682 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
683 * If the SPI controller driver does not support direct mapping, this function
684 * falls back to an implementation using spi_mem_exec_op(), so that the caller
685 * doesn't have to bother implementing a fallback on his own.
686 *
687 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
688 */
689struct spi_mem_dirmap_desc *
690spi_mem_dirmap_create(struct spi_mem *mem,
691 const struct spi_mem_dirmap_info *info)
692{
693 struct spi_controller *ctlr = mem->spi->controller;
694 struct spi_mem_dirmap_desc *desc;
695 int ret = -ENOTSUPP;
696
697 /* Make sure the number of address cycles is between 1 and 8 bytes. */
698 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
699 return ERR_PTR(-EINVAL);
700
701 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
702 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
703 return ERR_PTR(-EINVAL);
704
705 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
706 if (!desc)
707 return ERR_PTR(-ENOMEM);
708
709 desc->mem = mem;
710 desc->info = *info;
711 if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
712 ret = ctlr->mem_ops->dirmap_create(desc);
713
714 if (ret) {
715 desc->nodirmap = true;
716 if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
717 ret = -EOPNOTSUPP;
718 else
719 ret = 0;
720 }
721
722 if (ret) {
723 kfree(desc);
724 return ERR_PTR(ret);
725 }
726
727 return desc;
728}
729EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
730
731/**
732 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
733 * @desc: the direct mapping descriptor to destroy
734 *
735 * This function destroys a direct mapping descriptor previously created by
736 * spi_mem_dirmap_create().
737 */
738void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
739{
740 struct spi_controller *ctlr = desc->mem->spi->controller;
741
742 if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
743 ctlr->mem_ops->dirmap_destroy(desc);
744
745 kfree(desc);
746}
747EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
748
749static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
750{
751 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
752
753 spi_mem_dirmap_destroy(desc);
754}
755
756/**
757 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
758 * it to a device
759 * @dev: device the dirmap desc will be attached to
760 * @mem: SPI mem device this direct mapping should be created for
761 * @info: direct mapping information
762 *
763 * devm_ variant of the spi_mem_dirmap_create() function. See
764 * spi_mem_dirmap_create() for more details.
765 *
766 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
767 */
768struct spi_mem_dirmap_desc *
769devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
770 const struct spi_mem_dirmap_info *info)
771{
772 struct spi_mem_dirmap_desc **ptr, *desc;
773
774 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
775 GFP_KERNEL);
776 if (!ptr)
777 return ERR_PTR(-ENOMEM);
778
779 desc = spi_mem_dirmap_create(mem, info);
780 if (IS_ERR(desc)) {
781 devres_free(ptr);
782 } else {
783 *ptr = desc;
784 devres_add(dev, ptr);
785 }
786
787 return desc;
788}
789EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
790
791static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
792{
793 struct spi_mem_dirmap_desc **ptr = res;
794
795 if (WARN_ON(!ptr || !*ptr))
796 return 0;
797
798 return *ptr == data;
799}
800
801/**
802 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
803 * to a device
804 * @dev: device the dirmap desc is attached to
805 * @desc: the direct mapping descriptor to destroy
806 *
807 * devm_ variant of the spi_mem_dirmap_destroy() function. See
808 * spi_mem_dirmap_destroy() for more details.
809 */
810void devm_spi_mem_dirmap_destroy(struct device *dev,
811 struct spi_mem_dirmap_desc *desc)
812{
813 devres_release(dev, devm_spi_mem_dirmap_release,
814 devm_spi_mem_dirmap_match, desc);
815}
816EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
817
818/**
819 * spi_mem_dirmap_read() - Read data through a direct mapping
820 * @desc: direct mapping descriptor
821 * @offs: offset to start reading from. Note that this is not an absolute
822 * offset, but the offset within the direct mapping which already has
823 * its own offset
824 * @len: length in bytes
825 * @buf: destination buffer. This buffer must be DMA-able
826 *
827 * This function reads data from a memory device using a direct mapping
828 * previously instantiated with spi_mem_dirmap_create().
829 *
830 * Return: the amount of data read from the memory device or a negative error
831 * code. Note that the returned size might be smaller than @len, and the caller
832 * is responsible for calling spi_mem_dirmap_read() again when that happens.
833 */
834ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
835 u64 offs, size_t len, void *buf)
836{
837 struct spi_controller *ctlr = desc->mem->spi->controller;
838 ssize_t ret;
839
840 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
841 return -EINVAL;
842
843 if (!len)
844 return 0;
845
846 if (desc->nodirmap) {
847 ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
848 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
849 ret = spi_mem_access_start(desc->mem);
850 if (ret)
851 return ret;
852
853 ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
854
855 spi_mem_access_end(desc->mem);
856 } else {
857 ret = -ENOTSUPP;
858 }
859
860 return ret;
861}
862EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
863
864/**
865 * spi_mem_dirmap_write() - Write data through a direct mapping
866 * @desc: direct mapping descriptor
867 * @offs: offset to start writing from. Note that this is not an absolute
868 * offset, but the offset within the direct mapping which already has
869 * its own offset
870 * @len: length in bytes
871 * @buf: source buffer. This buffer must be DMA-able
872 *
873 * This function writes data to a memory device using a direct mapping
874 * previously instantiated with spi_mem_dirmap_create().
875 *
876 * Return: the amount of data written to the memory device or a negative error
877 * code. Note that the returned size might be smaller than @len, and the caller
878 * is responsible for calling spi_mem_dirmap_write() again when that happens.
879 */
880ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
881 u64 offs, size_t len, const void *buf)
882{
883 struct spi_controller *ctlr = desc->mem->spi->controller;
884 ssize_t ret;
885
886 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
887 return -EINVAL;
888
889 if (!len)
890 return 0;
891
892 if (desc->nodirmap) {
893 ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
894 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
895 ret = spi_mem_access_start(desc->mem);
896 if (ret)
897 return ret;
898
899 ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
900
901 spi_mem_access_end(desc->mem);
902 } else {
903 ret = -ENOTSUPP;
904 }
905
906 return ret;
907}
908EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
909
910static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
911{
912 return container_of(drv, struct spi_mem_driver, spidrv.driver);
913}
914
915static int spi_mem_read_status(struct spi_mem *mem,
916 const struct spi_mem_op *op,
917 u16 *status)
918{
919 const u8 *bytes = (u8 *)op->data.buf.in;
920 int ret;
921
922 ret = spi_mem_exec_op(mem, op);
923 if (ret)
924 return ret;
925
926 if (op->data.nbytes > 1)
927 *status = ((u16)bytes[0] << 8) | bytes[1];
928 else
929 *status = bytes[0];
930
931 return 0;
932}
933
934/**
935 * spi_mem_poll_status() - Poll memory device status
936 * @mem: SPI memory device
937 * @op: the memory operation to execute
938 * @mask: status bitmask to ckeck
939 * @match: (status & mask) expected value
940 * @initial_delay_us: delay in us before starting to poll
941 * @polling_delay_us: time to sleep between reads in us
942 * @timeout_ms: timeout in milliseconds
943 *
944 * This function polls a status register and returns when
945 * (status & mask) == match or when the timeout has expired.
946 *
947 * Return: 0 in case of success, -ETIMEDOUT in case of error,
948 * -EOPNOTSUPP if not supported.
949 */
950int spi_mem_poll_status(struct spi_mem *mem,
951 const struct spi_mem_op *op,
952 u16 mask, u16 match,
953 unsigned long initial_delay_us,
954 unsigned long polling_delay_us,
955 u16 timeout_ms)
956{
957 struct spi_controller *ctlr = mem->spi->controller;
958 int ret = -EOPNOTSUPP;
959 int read_status_ret;
960 u16 status;
961
962 if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
963 op->data.dir != SPI_MEM_DATA_IN)
964 return -EINVAL;
965
966 if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !spi_get_csgpiod(mem->spi, 0)) {
967 ret = spi_mem_access_start(mem);
968 if (ret)
969 return ret;
970
971 ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
972 initial_delay_us, polling_delay_us,
973 timeout_ms);
974
975 spi_mem_access_end(mem);
976 }
977
978 if (ret == -EOPNOTSUPP) {
979 if (!spi_mem_supports_op(mem, op))
980 return ret;
981
982 if (initial_delay_us < 10)
983 udelay(initial_delay_us);
984 else
985 usleep_range((initial_delay_us >> 2) + 1,
986 initial_delay_us);
987
988 ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
989 (read_status_ret || ((status) & mask) == match),
990 polling_delay_us, timeout_ms * 1000, false, mem,
991 op, &status);
992 if (read_status_ret)
993 return read_status_ret;
994 }
995
996 return ret;
997}
998EXPORT_SYMBOL_GPL(spi_mem_poll_status);
999
1000static int spi_mem_probe(struct spi_device *spi)
1001{
1002 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
1003 struct spi_controller *ctlr = spi->controller;
1004 struct spi_mem *mem;
1005
1006 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
1007 if (!mem)
1008 return -ENOMEM;
1009
1010 mem->spi = spi;
1011
1012 if (ctlr->mem_ops && ctlr->mem_ops->get_name)
1013 mem->name = ctlr->mem_ops->get_name(mem);
1014 else
1015 mem->name = dev_name(&spi->dev);
1016
1017 if (IS_ERR_OR_NULL(mem->name))
1018 return PTR_ERR_OR_ZERO(mem->name);
1019
1020 spi_set_drvdata(spi, mem);
1021
1022 return memdrv->probe(mem);
1023}
1024
1025static void spi_mem_remove(struct spi_device *spi)
1026{
1027 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
1028 struct spi_mem *mem = spi_get_drvdata(spi);
1029
1030 if (memdrv->remove)
1031 memdrv->remove(mem);
1032}
1033
1034static void spi_mem_shutdown(struct spi_device *spi)
1035{
1036 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
1037 struct spi_mem *mem = spi_get_drvdata(spi);
1038
1039 if (memdrv->shutdown)
1040 memdrv->shutdown(mem);
1041}
1042
1043/**
1044 * spi_mem_driver_register_with_owner() - Register a SPI memory driver
1045 * @memdrv: the SPI memory driver to register
1046 * @owner: the owner of this driver
1047 *
1048 * Registers a SPI memory driver.
1049 *
1050 * Return: 0 in case of success, a negative error core otherwise.
1051 */
1052
1053int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
1054 struct module *owner)
1055{
1056 memdrv->spidrv.probe = spi_mem_probe;
1057 memdrv->spidrv.remove = spi_mem_remove;
1058 memdrv->spidrv.shutdown = spi_mem_shutdown;
1059
1060 return __spi_register_driver(owner, &memdrv->spidrv);
1061}
1062EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
1063
1064/**
1065 * spi_mem_driver_unregister() - Unregister a SPI memory driver
1066 * @memdrv: the SPI memory driver to unregister
1067 *
1068 * Unregisters a SPI memory driver.
1069 */
1070void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
1071{
1072 spi_unregister_driver(&memdrv->spidrv);
1073}
1074EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);