Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0-or-later
2// SPI init/core code
3//
4// Copyright (C) 2005 David Brownell
5// Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7#include <linux/kernel.h>
8#include <linux/device.h>
9#include <linux/init.h>
10#include <linux/cache.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/mutex.h>
14#include <linux/of_device.h>
15#include <linux/of_irq.h>
16#include <linux/clk/clk-conf.h>
17#include <linux/slab.h>
18#include <linux/mod_devicetable.h>
19#include <linux/spi/spi.h>
20#include <linux/spi/spi-mem.h>
21#include <linux/gpio/consumer.h>
22#include <linux/pm_runtime.h>
23#include <linux/pm_domain.h>
24#include <linux/property.h>
25#include <linux/export.h>
26#include <linux/sched/rt.h>
27#include <uapi/linux/sched/types.h>
28#include <linux/delay.h>
29#include <linux/kthread.h>
30#include <linux/ioport.h>
31#include <linux/acpi.h>
32#include <linux/highmem.h>
33#include <linux/idr.h>
34#include <linux/platform_data/x86/apple.h>
35#include <linux/ptp_clock_kernel.h>
36#include <linux/percpu.h>
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/spi.h>
40EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42
43#include "internals.h"
44
45static DEFINE_IDR(spi_master_idr);
46
47static void spidev_release(struct device *dev)
48{
49 struct spi_device *spi = to_spi_device(dev);
50
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
54 kfree(spi);
55}
56
57static ssize_t
58modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59{
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
62
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
66
67 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68}
69static DEVICE_ATTR_RO(modalias);
70
71static ssize_t driver_override_store(struct device *dev,
72 struct device_attribute *a,
73 const char *buf, size_t count)
74{
75 struct spi_device *spi = to_spi_device(dev);
76 int ret;
77
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 if (ret)
80 return ret;
81
82 return count;
83}
84
85static ssize_t driver_override_show(struct device *dev,
86 struct device_attribute *a, char *buf)
87{
88 const struct spi_device *spi = to_spi_device(dev);
89 ssize_t len;
90
91 device_lock(dev);
92 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
93 device_unlock(dev);
94 return len;
95}
96static DEVICE_ATTR_RW(driver_override);
97
98static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99{
100 struct spi_statistics __percpu *pcpu_stats;
101
102 if (dev)
103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 else
105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106
107 if (pcpu_stats) {
108 int cpu;
109
110 for_each_possible_cpu(cpu) {
111 struct spi_statistics *stat;
112
113 stat = per_cpu_ptr(pcpu_stats, cpu);
114 u64_stats_init(&stat->syncp);
115 }
116 }
117 return pcpu_stats;
118}
119
120#define spi_pcpu_stats_totalize(ret, in, field) \
121do { \
122 int i; \
123 ret = 0; \
124 for_each_possible_cpu(i) { \
125 const struct spi_statistics *pcpu_stats; \
126 u64 inc; \
127 unsigned int start; \
128 pcpu_stats = per_cpu_ptr(in, i); \
129 do { \
130 start = u64_stats_fetch_begin( \
131 &pcpu_stats->syncp); \
132 inc = u64_stats_read(&pcpu_stats->field); \
133 } while (u64_stats_fetch_retry( \
134 &pcpu_stats->syncp, start)); \
135 ret += inc; \
136 } \
137} while (0)
138
139#define SPI_STATISTICS_ATTRS(field, file) \
140static ssize_t spi_controller_##field##_show(struct device *dev, \
141 struct device_attribute *attr, \
142 char *buf) \
143{ \
144 struct spi_controller *ctlr = container_of(dev, \
145 struct spi_controller, dev); \
146 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
147} \
148static struct device_attribute dev_attr_spi_controller_##field = { \
149 .attr = { .name = file, .mode = 0444 }, \
150 .show = spi_controller_##field##_show, \
151}; \
152static ssize_t spi_device_##field##_show(struct device *dev, \
153 struct device_attribute *attr, \
154 char *buf) \
155{ \
156 struct spi_device *spi = to_spi_device(dev); \
157 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
158} \
159static struct device_attribute dev_attr_spi_device_##field = { \
160 .attr = { .name = file, .mode = 0444 }, \
161 .show = spi_device_##field##_show, \
162}
163
164#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
165static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
166 char *buf) \
167{ \
168 ssize_t len; \
169 u64 val; \
170 spi_pcpu_stats_totalize(val, stat, field); \
171 len = sysfs_emit(buf, "%llu\n", val); \
172 return len; \
173} \
174SPI_STATISTICS_ATTRS(name, file)
175
176#define SPI_STATISTICS_SHOW(field) \
177 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
178 field)
179
180SPI_STATISTICS_SHOW(messages);
181SPI_STATISTICS_SHOW(transfers);
182SPI_STATISTICS_SHOW(errors);
183SPI_STATISTICS_SHOW(timedout);
184
185SPI_STATISTICS_SHOW(spi_sync);
186SPI_STATISTICS_SHOW(spi_sync_immediate);
187SPI_STATISTICS_SHOW(spi_async);
188
189SPI_STATISTICS_SHOW(bytes);
190SPI_STATISTICS_SHOW(bytes_rx);
191SPI_STATISTICS_SHOW(bytes_tx);
192
193#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
194 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
195 "transfer_bytes_histo_" number, \
196 transfer_bytes_histo[index])
197SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
198SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
199SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
200SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
201SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
202SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
203SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
204SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
205SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
206SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
207SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
208SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
209SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
210SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
211SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
212SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
213SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
214
215SPI_STATISTICS_SHOW(transfers_split_maxsize);
216
217static struct attribute *spi_dev_attrs[] = {
218 &dev_attr_modalias.attr,
219 &dev_attr_driver_override.attr,
220 NULL,
221};
222
223static const struct attribute_group spi_dev_group = {
224 .attrs = spi_dev_attrs,
225};
226
227static struct attribute *spi_device_statistics_attrs[] = {
228 &dev_attr_spi_device_messages.attr,
229 &dev_attr_spi_device_transfers.attr,
230 &dev_attr_spi_device_errors.attr,
231 &dev_attr_spi_device_timedout.attr,
232 &dev_attr_spi_device_spi_sync.attr,
233 &dev_attr_spi_device_spi_sync_immediate.attr,
234 &dev_attr_spi_device_spi_async.attr,
235 &dev_attr_spi_device_bytes.attr,
236 &dev_attr_spi_device_bytes_rx.attr,
237 &dev_attr_spi_device_bytes_tx.attr,
238 &dev_attr_spi_device_transfer_bytes_histo0.attr,
239 &dev_attr_spi_device_transfer_bytes_histo1.attr,
240 &dev_attr_spi_device_transfer_bytes_histo2.attr,
241 &dev_attr_spi_device_transfer_bytes_histo3.attr,
242 &dev_attr_spi_device_transfer_bytes_histo4.attr,
243 &dev_attr_spi_device_transfer_bytes_histo5.attr,
244 &dev_attr_spi_device_transfer_bytes_histo6.attr,
245 &dev_attr_spi_device_transfer_bytes_histo7.attr,
246 &dev_attr_spi_device_transfer_bytes_histo8.attr,
247 &dev_attr_spi_device_transfer_bytes_histo9.attr,
248 &dev_attr_spi_device_transfer_bytes_histo10.attr,
249 &dev_attr_spi_device_transfer_bytes_histo11.attr,
250 &dev_attr_spi_device_transfer_bytes_histo12.attr,
251 &dev_attr_spi_device_transfer_bytes_histo13.attr,
252 &dev_attr_spi_device_transfer_bytes_histo14.attr,
253 &dev_attr_spi_device_transfer_bytes_histo15.attr,
254 &dev_attr_spi_device_transfer_bytes_histo16.attr,
255 &dev_attr_spi_device_transfers_split_maxsize.attr,
256 NULL,
257};
258
259static const struct attribute_group spi_device_statistics_group = {
260 .name = "statistics",
261 .attrs = spi_device_statistics_attrs,
262};
263
264static const struct attribute_group *spi_dev_groups[] = {
265 &spi_dev_group,
266 &spi_device_statistics_group,
267 NULL,
268};
269
270static struct attribute *spi_controller_statistics_attrs[] = {
271 &dev_attr_spi_controller_messages.attr,
272 &dev_attr_spi_controller_transfers.attr,
273 &dev_attr_spi_controller_errors.attr,
274 &dev_attr_spi_controller_timedout.attr,
275 &dev_attr_spi_controller_spi_sync.attr,
276 &dev_attr_spi_controller_spi_sync_immediate.attr,
277 &dev_attr_spi_controller_spi_async.attr,
278 &dev_attr_spi_controller_bytes.attr,
279 &dev_attr_spi_controller_bytes_rx.attr,
280 &dev_attr_spi_controller_bytes_tx.attr,
281 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
282 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
283 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
284 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
285 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
286 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
287 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
288 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
289 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
290 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
291 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
292 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
293 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
294 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
295 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
296 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
297 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
298 &dev_attr_spi_controller_transfers_split_maxsize.attr,
299 NULL,
300};
301
302static const struct attribute_group spi_controller_statistics_group = {
303 .name = "statistics",
304 .attrs = spi_controller_statistics_attrs,
305};
306
307static const struct attribute_group *spi_master_groups[] = {
308 &spi_controller_statistics_group,
309 NULL,
310};
311
312static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
313 struct spi_transfer *xfer,
314 struct spi_controller *ctlr)
315{
316 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
317 struct spi_statistics *stats;
318
319 if (l2len < 0)
320 l2len = 0;
321
322 get_cpu();
323 stats = this_cpu_ptr(pcpu_stats);
324 u64_stats_update_begin(&stats->syncp);
325
326 u64_stats_inc(&stats->transfers);
327 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
328
329 u64_stats_add(&stats->bytes, xfer->len);
330 if ((xfer->tx_buf) &&
331 (xfer->tx_buf != ctlr->dummy_tx))
332 u64_stats_add(&stats->bytes_tx, xfer->len);
333 if ((xfer->rx_buf) &&
334 (xfer->rx_buf != ctlr->dummy_rx))
335 u64_stats_add(&stats->bytes_rx, xfer->len);
336
337 u64_stats_update_end(&stats->syncp);
338 put_cpu();
339}
340
341/*
342 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
343 * and the sysfs version makes coldplug work too.
344 */
345static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
346{
347 while (id->name[0]) {
348 if (!strcmp(name, id->name))
349 return id;
350 id++;
351 }
352 return NULL;
353}
354
355const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
356{
357 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
358
359 return spi_match_id(sdrv->id_table, sdev->modalias);
360}
361EXPORT_SYMBOL_GPL(spi_get_device_id);
362
363const void *spi_get_device_match_data(const struct spi_device *sdev)
364{
365 const void *match;
366
367 match = device_get_match_data(&sdev->dev);
368 if (match)
369 return match;
370
371 return (const void *)spi_get_device_id(sdev)->driver_data;
372}
373EXPORT_SYMBOL_GPL(spi_get_device_match_data);
374
375static int spi_match_device(struct device *dev, struct device_driver *drv)
376{
377 const struct spi_device *spi = to_spi_device(dev);
378 const struct spi_driver *sdrv = to_spi_driver(drv);
379
380 /* Check override first, and if set, only use the named driver */
381 if (spi->driver_override)
382 return strcmp(spi->driver_override, drv->name) == 0;
383
384 /* Attempt an OF style match */
385 if (of_driver_match_device(dev, drv))
386 return 1;
387
388 /* Then try ACPI */
389 if (acpi_driver_match_device(dev, drv))
390 return 1;
391
392 if (sdrv->id_table)
393 return !!spi_match_id(sdrv->id_table, spi->modalias);
394
395 return strcmp(spi->modalias, drv->name) == 0;
396}
397
398static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
399{
400 const struct spi_device *spi = to_spi_device(dev);
401 int rc;
402
403 rc = acpi_device_uevent_modalias(dev, env);
404 if (rc != -ENODEV)
405 return rc;
406
407 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
408}
409
410static int spi_probe(struct device *dev)
411{
412 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
413 struct spi_device *spi = to_spi_device(dev);
414 int ret;
415
416 ret = of_clk_set_defaults(dev->of_node, false);
417 if (ret)
418 return ret;
419
420 if (dev->of_node) {
421 spi->irq = of_irq_get(dev->of_node, 0);
422 if (spi->irq == -EPROBE_DEFER)
423 return -EPROBE_DEFER;
424 if (spi->irq < 0)
425 spi->irq = 0;
426 }
427
428 ret = dev_pm_domain_attach(dev, true);
429 if (ret)
430 return ret;
431
432 if (sdrv->probe) {
433 ret = sdrv->probe(spi);
434 if (ret)
435 dev_pm_domain_detach(dev, true);
436 }
437
438 return ret;
439}
440
441static void spi_remove(struct device *dev)
442{
443 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
444
445 if (sdrv->remove)
446 sdrv->remove(to_spi_device(dev));
447
448 dev_pm_domain_detach(dev, true);
449}
450
451static void spi_shutdown(struct device *dev)
452{
453 if (dev->driver) {
454 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
455
456 if (sdrv->shutdown)
457 sdrv->shutdown(to_spi_device(dev));
458 }
459}
460
461struct bus_type spi_bus_type = {
462 .name = "spi",
463 .dev_groups = spi_dev_groups,
464 .match = spi_match_device,
465 .uevent = spi_uevent,
466 .probe = spi_probe,
467 .remove = spi_remove,
468 .shutdown = spi_shutdown,
469};
470EXPORT_SYMBOL_GPL(spi_bus_type);
471
472/**
473 * __spi_register_driver - register a SPI driver
474 * @owner: owner module of the driver to register
475 * @sdrv: the driver to register
476 * Context: can sleep
477 *
478 * Return: zero on success, else a negative error code.
479 */
480int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
481{
482 sdrv->driver.owner = owner;
483 sdrv->driver.bus = &spi_bus_type;
484
485 /*
486 * For Really Good Reasons we use spi: modaliases not of:
487 * modaliases for DT so module autoloading won't work if we
488 * don't have a spi_device_id as well as a compatible string.
489 */
490 if (sdrv->driver.of_match_table) {
491 const struct of_device_id *of_id;
492
493 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
494 of_id++) {
495 const char *of_name;
496
497 /* Strip off any vendor prefix */
498 of_name = strnchr(of_id->compatible,
499 sizeof(of_id->compatible), ',');
500 if (of_name)
501 of_name++;
502 else
503 of_name = of_id->compatible;
504
505 if (sdrv->id_table) {
506 const struct spi_device_id *spi_id;
507
508 spi_id = spi_match_id(sdrv->id_table, of_name);
509 if (spi_id)
510 continue;
511 } else {
512 if (strcmp(sdrv->driver.name, of_name) == 0)
513 continue;
514 }
515
516 pr_warn("SPI driver %s has no spi_device_id for %s\n",
517 sdrv->driver.name, of_id->compatible);
518 }
519 }
520
521 return driver_register(&sdrv->driver);
522}
523EXPORT_SYMBOL_GPL(__spi_register_driver);
524
525/*-------------------------------------------------------------------------*/
526
527/*
528 * SPI devices should normally not be created by SPI device drivers; that
529 * would make them board-specific. Similarly with SPI controller drivers.
530 * Device registration normally goes into like arch/.../mach.../board-YYY.c
531 * with other readonly (flashable) information about mainboard devices.
532 */
533
534struct boardinfo {
535 struct list_head list;
536 struct spi_board_info board_info;
537};
538
539static LIST_HEAD(board_list);
540static LIST_HEAD(spi_controller_list);
541
542/*
543 * Used to protect add/del operation for board_info list and
544 * spi_controller list, and their matching process also used
545 * to protect object of type struct idr.
546 */
547static DEFINE_MUTEX(board_lock);
548
549/**
550 * spi_alloc_device - Allocate a new SPI device
551 * @ctlr: Controller to which device is connected
552 * Context: can sleep
553 *
554 * Allows a driver to allocate and initialize a spi_device without
555 * registering it immediately. This allows a driver to directly
556 * fill the spi_device with device parameters before calling
557 * spi_add_device() on it.
558 *
559 * Caller is responsible to call spi_add_device() on the returned
560 * spi_device structure to add it to the SPI controller. If the caller
561 * needs to discard the spi_device without adding it, then it should
562 * call spi_dev_put() on it.
563 *
564 * Return: a pointer to the new device, or NULL.
565 */
566struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
567{
568 struct spi_device *spi;
569
570 if (!spi_controller_get(ctlr))
571 return NULL;
572
573 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
574 if (!spi) {
575 spi_controller_put(ctlr);
576 return NULL;
577 }
578
579 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
580 if (!spi->pcpu_statistics) {
581 kfree(spi);
582 spi_controller_put(ctlr);
583 return NULL;
584 }
585
586 spi->master = spi->controller = ctlr;
587 spi->dev.parent = &ctlr->dev;
588 spi->dev.bus = &spi_bus_type;
589 spi->dev.release = spidev_release;
590 spi->mode = ctlr->buswidth_override_bits;
591
592 device_initialize(&spi->dev);
593 return spi;
594}
595EXPORT_SYMBOL_GPL(spi_alloc_device);
596
597static void spi_dev_set_name(struct spi_device *spi)
598{
599 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
600
601 if (adev) {
602 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
603 return;
604 }
605
606 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
607 spi->chip_select);
608}
609
610static int spi_dev_check(struct device *dev, void *data)
611{
612 struct spi_device *spi = to_spi_device(dev);
613 struct spi_device *new_spi = data;
614
615 if (spi->controller == new_spi->controller &&
616 spi->chip_select == new_spi->chip_select)
617 return -EBUSY;
618 return 0;
619}
620
621static void spi_cleanup(struct spi_device *spi)
622{
623 if (spi->controller->cleanup)
624 spi->controller->cleanup(spi);
625}
626
627static int __spi_add_device(struct spi_device *spi)
628{
629 struct spi_controller *ctlr = spi->controller;
630 struct device *dev = ctlr->dev.parent;
631 int status;
632
633 /*
634 * We need to make sure there's no other device with this
635 * chipselect **BEFORE** we call setup(), else we'll trash
636 * its configuration.
637 */
638 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
639 if (status) {
640 dev_err(dev, "chipselect %d already in use\n",
641 spi->chip_select);
642 return status;
643 }
644
645 /* Controller may unregister concurrently */
646 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
647 !device_is_registered(&ctlr->dev)) {
648 return -ENODEV;
649 }
650
651 if (ctlr->cs_gpiods)
652 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
653
654 /*
655 * Drivers may modify this initial i/o setup, but will
656 * normally rely on the device being setup. Devices
657 * using SPI_CS_HIGH can't coexist well otherwise...
658 */
659 status = spi_setup(spi);
660 if (status < 0) {
661 dev_err(dev, "can't setup %s, status %d\n",
662 dev_name(&spi->dev), status);
663 return status;
664 }
665
666 /* Device may be bound to an active driver when this returns */
667 status = device_add(&spi->dev);
668 if (status < 0) {
669 dev_err(dev, "can't add %s, status %d\n",
670 dev_name(&spi->dev), status);
671 spi_cleanup(spi);
672 } else {
673 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
674 }
675
676 return status;
677}
678
679/**
680 * spi_add_device - Add spi_device allocated with spi_alloc_device
681 * @spi: spi_device to register
682 *
683 * Companion function to spi_alloc_device. Devices allocated with
684 * spi_alloc_device can be added onto the spi bus with this function.
685 *
686 * Return: 0 on success; negative errno on failure
687 */
688int spi_add_device(struct spi_device *spi)
689{
690 struct spi_controller *ctlr = spi->controller;
691 struct device *dev = ctlr->dev.parent;
692 int status;
693
694 /* Chipselects are numbered 0..max; validate. */
695 if (spi->chip_select >= ctlr->num_chipselect) {
696 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
697 ctlr->num_chipselect);
698 return -EINVAL;
699 }
700
701 /* Set the bus ID string */
702 spi_dev_set_name(spi);
703
704 mutex_lock(&ctlr->add_lock);
705 status = __spi_add_device(spi);
706 mutex_unlock(&ctlr->add_lock);
707 return status;
708}
709EXPORT_SYMBOL_GPL(spi_add_device);
710
711static int spi_add_device_locked(struct spi_device *spi)
712{
713 struct spi_controller *ctlr = spi->controller;
714 struct device *dev = ctlr->dev.parent;
715
716 /* Chipselects are numbered 0..max; validate. */
717 if (spi->chip_select >= ctlr->num_chipselect) {
718 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
719 ctlr->num_chipselect);
720 return -EINVAL;
721 }
722
723 /* Set the bus ID string */
724 spi_dev_set_name(spi);
725
726 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
727 return __spi_add_device(spi);
728}
729
730/**
731 * spi_new_device - instantiate one new SPI device
732 * @ctlr: Controller to which device is connected
733 * @chip: Describes the SPI device
734 * Context: can sleep
735 *
736 * On typical mainboards, this is purely internal; and it's not needed
737 * after board init creates the hard-wired devices. Some development
738 * platforms may not be able to use spi_register_board_info though, and
739 * this is exported so that for example a USB or parport based adapter
740 * driver could add devices (which it would learn about out-of-band).
741 *
742 * Return: the new device, or NULL.
743 */
744struct spi_device *spi_new_device(struct spi_controller *ctlr,
745 struct spi_board_info *chip)
746{
747 struct spi_device *proxy;
748 int status;
749
750 /*
751 * NOTE: caller did any chip->bus_num checks necessary.
752 *
753 * Also, unless we change the return value convention to use
754 * error-or-pointer (not NULL-or-pointer), troubleshootability
755 * suggests syslogged diagnostics are best here (ugh).
756 */
757
758 proxy = spi_alloc_device(ctlr);
759 if (!proxy)
760 return NULL;
761
762 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
763
764 proxy->chip_select = chip->chip_select;
765 proxy->max_speed_hz = chip->max_speed_hz;
766 proxy->mode = chip->mode;
767 proxy->irq = chip->irq;
768 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
769 proxy->dev.platform_data = (void *) chip->platform_data;
770 proxy->controller_data = chip->controller_data;
771 proxy->controller_state = NULL;
772
773 if (chip->swnode) {
774 status = device_add_software_node(&proxy->dev, chip->swnode);
775 if (status) {
776 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
777 chip->modalias, status);
778 goto err_dev_put;
779 }
780 }
781
782 status = spi_add_device(proxy);
783 if (status < 0)
784 goto err_dev_put;
785
786 return proxy;
787
788err_dev_put:
789 device_remove_software_node(&proxy->dev);
790 spi_dev_put(proxy);
791 return NULL;
792}
793EXPORT_SYMBOL_GPL(spi_new_device);
794
795/**
796 * spi_unregister_device - unregister a single SPI device
797 * @spi: spi_device to unregister
798 *
799 * Start making the passed SPI device vanish. Normally this would be handled
800 * by spi_unregister_controller().
801 */
802void spi_unregister_device(struct spi_device *spi)
803{
804 if (!spi)
805 return;
806
807 if (spi->dev.of_node) {
808 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
809 of_node_put(spi->dev.of_node);
810 }
811 if (ACPI_COMPANION(&spi->dev))
812 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
813 device_remove_software_node(&spi->dev);
814 device_del(&spi->dev);
815 spi_cleanup(spi);
816 put_device(&spi->dev);
817}
818EXPORT_SYMBOL_GPL(spi_unregister_device);
819
820static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
821 struct spi_board_info *bi)
822{
823 struct spi_device *dev;
824
825 if (ctlr->bus_num != bi->bus_num)
826 return;
827
828 dev = spi_new_device(ctlr, bi);
829 if (!dev)
830 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
831 bi->modalias);
832}
833
834/**
835 * spi_register_board_info - register SPI devices for a given board
836 * @info: array of chip descriptors
837 * @n: how many descriptors are provided
838 * Context: can sleep
839 *
840 * Board-specific early init code calls this (probably during arch_initcall)
841 * with segments of the SPI device table. Any device nodes are created later,
842 * after the relevant parent SPI controller (bus_num) is defined. We keep
843 * this table of devices forever, so that reloading a controller driver will
844 * not make Linux forget about these hard-wired devices.
845 *
846 * Other code can also call this, e.g. a particular add-on board might provide
847 * SPI devices through its expansion connector, so code initializing that board
848 * would naturally declare its SPI devices.
849 *
850 * The board info passed can safely be __initdata ... but be careful of
851 * any embedded pointers (platform_data, etc), they're copied as-is.
852 *
853 * Return: zero on success, else a negative error code.
854 */
855int spi_register_board_info(struct spi_board_info const *info, unsigned n)
856{
857 struct boardinfo *bi;
858 int i;
859
860 if (!n)
861 return 0;
862
863 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
864 if (!bi)
865 return -ENOMEM;
866
867 for (i = 0; i < n; i++, bi++, info++) {
868 struct spi_controller *ctlr;
869
870 memcpy(&bi->board_info, info, sizeof(*info));
871
872 mutex_lock(&board_lock);
873 list_add_tail(&bi->list, &board_list);
874 list_for_each_entry(ctlr, &spi_controller_list, list)
875 spi_match_controller_to_boardinfo(ctlr,
876 &bi->board_info);
877 mutex_unlock(&board_lock);
878 }
879
880 return 0;
881}
882
883/*-------------------------------------------------------------------------*/
884
885/* Core methods for SPI resource management */
886
887/**
888 * spi_res_alloc - allocate a spi resource that is life-cycle managed
889 * during the processing of a spi_message while using
890 * spi_transfer_one
891 * @spi: the spi device for which we allocate memory
892 * @release: the release code to execute for this resource
893 * @size: size to alloc and return
894 * @gfp: GFP allocation flags
895 *
896 * Return: the pointer to the allocated data
897 *
898 * This may get enhanced in the future to allocate from a memory pool
899 * of the @spi_device or @spi_controller to avoid repeated allocations.
900 */
901static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
902 size_t size, gfp_t gfp)
903{
904 struct spi_res *sres;
905
906 sres = kzalloc(sizeof(*sres) + size, gfp);
907 if (!sres)
908 return NULL;
909
910 INIT_LIST_HEAD(&sres->entry);
911 sres->release = release;
912
913 return sres->data;
914}
915
916/**
917 * spi_res_free - free an spi resource
918 * @res: pointer to the custom data of a resource
919 */
920static void spi_res_free(void *res)
921{
922 struct spi_res *sres = container_of(res, struct spi_res, data);
923
924 if (!res)
925 return;
926
927 WARN_ON(!list_empty(&sres->entry));
928 kfree(sres);
929}
930
931/**
932 * spi_res_add - add a spi_res to the spi_message
933 * @message: the spi message
934 * @res: the spi_resource
935 */
936static void spi_res_add(struct spi_message *message, void *res)
937{
938 struct spi_res *sres = container_of(res, struct spi_res, data);
939
940 WARN_ON(!list_empty(&sres->entry));
941 list_add_tail(&sres->entry, &message->resources);
942}
943
944/**
945 * spi_res_release - release all spi resources for this message
946 * @ctlr: the @spi_controller
947 * @message: the @spi_message
948 */
949static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
950{
951 struct spi_res *res, *tmp;
952
953 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
954 if (res->release)
955 res->release(ctlr, message, res->data);
956
957 list_del(&res->entry);
958
959 kfree(res);
960 }
961}
962
963/*-------------------------------------------------------------------------*/
964
965static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
966{
967 bool activate = enable;
968
969 /*
970 * Avoid calling into the driver (or doing delays) if the chip select
971 * isn't actually changing from the last time this was called.
972 */
973 if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
974 (!enable && spi->controller->last_cs != spi->chip_select)) &&
975 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
976 return;
977
978 trace_spi_set_cs(spi, activate);
979
980 spi->controller->last_cs = enable ? spi->chip_select : -1;
981 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
982
983 if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
984 spi_delay_exec(&spi->cs_hold, NULL);
985 }
986
987 if (spi->mode & SPI_CS_HIGH)
988 enable = !enable;
989
990 if (spi->cs_gpiod) {
991 if (!(spi->mode & SPI_NO_CS)) {
992 /*
993 * Historically ACPI has no means of the GPIO polarity and
994 * thus the SPISerialBus() resource defines it on the per-chip
995 * basis. In order to avoid a chain of negations, the GPIO
996 * polarity is considered being Active High. Even for the cases
997 * when _DSD() is involved (in the updated versions of ACPI)
998 * the GPIO CS polarity must be defined Active High to avoid
999 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1000 * into account.
1001 */
1002 if (has_acpi_companion(&spi->dev))
1003 gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
1004 else
1005 /* Polarity handled by GPIO library */
1006 gpiod_set_value_cansleep(spi->cs_gpiod, activate);
1007 }
1008 /* Some SPI masters need both GPIO CS & slave_select */
1009 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
1010 spi->controller->set_cs)
1011 spi->controller->set_cs(spi, !enable);
1012 } else if (spi->controller->set_cs) {
1013 spi->controller->set_cs(spi, !enable);
1014 }
1015
1016 if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
1017 if (activate)
1018 spi_delay_exec(&spi->cs_setup, NULL);
1019 else
1020 spi_delay_exec(&spi->cs_inactive, NULL);
1021 }
1022}
1023
1024#ifdef CONFIG_HAS_DMA
1025static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1026 struct sg_table *sgt, void *buf, size_t len,
1027 enum dma_data_direction dir, unsigned long attrs)
1028{
1029 const bool vmalloced_buf = is_vmalloc_addr(buf);
1030 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1031#ifdef CONFIG_HIGHMEM
1032 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1033 (unsigned long)buf < (PKMAP_BASE +
1034 (LAST_PKMAP * PAGE_SIZE)));
1035#else
1036 const bool kmap_buf = false;
1037#endif
1038 int desc_len;
1039 int sgs;
1040 struct page *vm_page;
1041 struct scatterlist *sg;
1042 void *sg_buf;
1043 size_t min;
1044 int i, ret;
1045
1046 if (vmalloced_buf || kmap_buf) {
1047 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1048 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1049 } else if (virt_addr_valid(buf)) {
1050 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1051 sgs = DIV_ROUND_UP(len, desc_len);
1052 } else {
1053 return -EINVAL;
1054 }
1055
1056 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1057 if (ret != 0)
1058 return ret;
1059
1060 sg = &sgt->sgl[0];
1061 for (i = 0; i < sgs; i++) {
1062
1063 if (vmalloced_buf || kmap_buf) {
1064 /*
1065 * Next scatterlist entry size is the minimum between
1066 * the desc_len and the remaining buffer length that
1067 * fits in a page.
1068 */
1069 min = min_t(size_t, desc_len,
1070 min_t(size_t, len,
1071 PAGE_SIZE - offset_in_page(buf)));
1072 if (vmalloced_buf)
1073 vm_page = vmalloc_to_page(buf);
1074 else
1075 vm_page = kmap_to_page(buf);
1076 if (!vm_page) {
1077 sg_free_table(sgt);
1078 return -ENOMEM;
1079 }
1080 sg_set_page(sg, vm_page,
1081 min, offset_in_page(buf));
1082 } else {
1083 min = min_t(size_t, len, desc_len);
1084 sg_buf = buf;
1085 sg_set_buf(sg, sg_buf, min);
1086 }
1087
1088 buf += min;
1089 len -= min;
1090 sg = sg_next(sg);
1091 }
1092
1093 ret = dma_map_sgtable(dev, sgt, dir, attrs);
1094 if (ret < 0) {
1095 sg_free_table(sgt);
1096 return ret;
1097 }
1098
1099 return 0;
1100}
1101
1102int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1103 struct sg_table *sgt, void *buf, size_t len,
1104 enum dma_data_direction dir)
1105{
1106 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1107}
1108
1109static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1110 struct device *dev, struct sg_table *sgt,
1111 enum dma_data_direction dir,
1112 unsigned long attrs)
1113{
1114 if (sgt->orig_nents) {
1115 dma_unmap_sgtable(dev, sgt, dir, attrs);
1116 sg_free_table(sgt);
1117 sgt->orig_nents = 0;
1118 sgt->nents = 0;
1119 }
1120}
1121
1122void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1123 struct sg_table *sgt, enum dma_data_direction dir)
1124{
1125 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1126}
1127
1128static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1129{
1130 struct device *tx_dev, *rx_dev;
1131 struct spi_transfer *xfer;
1132 int ret;
1133
1134 if (!ctlr->can_dma)
1135 return 0;
1136
1137 if (ctlr->dma_tx)
1138 tx_dev = ctlr->dma_tx->device->dev;
1139 else if (ctlr->dma_map_dev)
1140 tx_dev = ctlr->dma_map_dev;
1141 else
1142 tx_dev = ctlr->dev.parent;
1143
1144 if (ctlr->dma_rx)
1145 rx_dev = ctlr->dma_rx->device->dev;
1146 else if (ctlr->dma_map_dev)
1147 rx_dev = ctlr->dma_map_dev;
1148 else
1149 rx_dev = ctlr->dev.parent;
1150
1151 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1152 /* The sync is done before each transfer. */
1153 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1154
1155 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1156 continue;
1157
1158 if (xfer->tx_buf != NULL) {
1159 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1160 (void *)xfer->tx_buf,
1161 xfer->len, DMA_TO_DEVICE,
1162 attrs);
1163 if (ret != 0)
1164 return ret;
1165 }
1166
1167 if (xfer->rx_buf != NULL) {
1168 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1169 xfer->rx_buf, xfer->len,
1170 DMA_FROM_DEVICE, attrs);
1171 if (ret != 0) {
1172 spi_unmap_buf_attrs(ctlr, tx_dev,
1173 &xfer->tx_sg, DMA_TO_DEVICE,
1174 attrs);
1175
1176 return ret;
1177 }
1178 }
1179 }
1180
1181 ctlr->cur_rx_dma_dev = rx_dev;
1182 ctlr->cur_tx_dma_dev = tx_dev;
1183 ctlr->cur_msg_mapped = true;
1184
1185 return 0;
1186}
1187
1188static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1189{
1190 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1191 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1192 struct spi_transfer *xfer;
1193
1194 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1195 return 0;
1196
1197 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1198 /* The sync has already been done after each transfer. */
1199 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1200
1201 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1202 continue;
1203
1204 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1205 DMA_FROM_DEVICE, attrs);
1206 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1207 DMA_TO_DEVICE, attrs);
1208 }
1209
1210 ctlr->cur_msg_mapped = false;
1211
1212 return 0;
1213}
1214
1215static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1216 struct spi_transfer *xfer)
1217{
1218 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1219 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1220
1221 if (!ctlr->cur_msg_mapped)
1222 return;
1223
1224 if (xfer->tx_sg.orig_nents)
1225 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1226 if (xfer->rx_sg.orig_nents)
1227 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1228}
1229
1230static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1231 struct spi_transfer *xfer)
1232{
1233 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1234 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1235
1236 if (!ctlr->cur_msg_mapped)
1237 return;
1238
1239 if (xfer->rx_sg.orig_nents)
1240 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1241 if (xfer->tx_sg.orig_nents)
1242 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1243}
1244#else /* !CONFIG_HAS_DMA */
1245static inline int __spi_map_msg(struct spi_controller *ctlr,
1246 struct spi_message *msg)
1247{
1248 return 0;
1249}
1250
1251static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1252 struct spi_message *msg)
1253{
1254 return 0;
1255}
1256
1257static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1258 struct spi_transfer *xfer)
1259{
1260}
1261
1262static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1263 struct spi_transfer *xfer)
1264{
1265}
1266#endif /* !CONFIG_HAS_DMA */
1267
1268static inline int spi_unmap_msg(struct spi_controller *ctlr,
1269 struct spi_message *msg)
1270{
1271 struct spi_transfer *xfer;
1272
1273 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1274 /*
1275 * Restore the original value of tx_buf or rx_buf if they are
1276 * NULL.
1277 */
1278 if (xfer->tx_buf == ctlr->dummy_tx)
1279 xfer->tx_buf = NULL;
1280 if (xfer->rx_buf == ctlr->dummy_rx)
1281 xfer->rx_buf = NULL;
1282 }
1283
1284 return __spi_unmap_msg(ctlr, msg);
1285}
1286
1287static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1288{
1289 struct spi_transfer *xfer;
1290 void *tmp;
1291 unsigned int max_tx, max_rx;
1292
1293 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1294 && !(msg->spi->mode & SPI_3WIRE)) {
1295 max_tx = 0;
1296 max_rx = 0;
1297
1298 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1299 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1300 !xfer->tx_buf)
1301 max_tx = max(xfer->len, max_tx);
1302 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1303 !xfer->rx_buf)
1304 max_rx = max(xfer->len, max_rx);
1305 }
1306
1307 if (max_tx) {
1308 tmp = krealloc(ctlr->dummy_tx, max_tx,
1309 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1310 if (!tmp)
1311 return -ENOMEM;
1312 ctlr->dummy_tx = tmp;
1313 }
1314
1315 if (max_rx) {
1316 tmp = krealloc(ctlr->dummy_rx, max_rx,
1317 GFP_KERNEL | GFP_DMA);
1318 if (!tmp)
1319 return -ENOMEM;
1320 ctlr->dummy_rx = tmp;
1321 }
1322
1323 if (max_tx || max_rx) {
1324 list_for_each_entry(xfer, &msg->transfers,
1325 transfer_list) {
1326 if (!xfer->len)
1327 continue;
1328 if (!xfer->tx_buf)
1329 xfer->tx_buf = ctlr->dummy_tx;
1330 if (!xfer->rx_buf)
1331 xfer->rx_buf = ctlr->dummy_rx;
1332 }
1333 }
1334 }
1335
1336 return __spi_map_msg(ctlr, msg);
1337}
1338
1339static int spi_transfer_wait(struct spi_controller *ctlr,
1340 struct spi_message *msg,
1341 struct spi_transfer *xfer)
1342{
1343 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1344 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1345 u32 speed_hz = xfer->speed_hz;
1346 unsigned long long ms;
1347
1348 if (spi_controller_is_slave(ctlr)) {
1349 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1350 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1351 return -EINTR;
1352 }
1353 } else {
1354 if (!speed_hz)
1355 speed_hz = 100000;
1356
1357 /*
1358 * For each byte we wait for 8 cycles of the SPI clock.
1359 * Since speed is defined in Hz and we want milliseconds,
1360 * use respective multiplier, but before the division,
1361 * otherwise we may get 0 for short transfers.
1362 */
1363 ms = 8LL * MSEC_PER_SEC * xfer->len;
1364 do_div(ms, speed_hz);
1365
1366 /*
1367 * Increase it twice and add 200 ms tolerance, use
1368 * predefined maximum in case of overflow.
1369 */
1370 ms += ms + 200;
1371 if (ms > UINT_MAX)
1372 ms = UINT_MAX;
1373
1374 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1375 msecs_to_jiffies(ms));
1376
1377 if (ms == 0) {
1378 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1379 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1380 dev_err(&msg->spi->dev,
1381 "SPI transfer timed out\n");
1382 return -ETIMEDOUT;
1383 }
1384 }
1385
1386 return 0;
1387}
1388
1389static void _spi_transfer_delay_ns(u32 ns)
1390{
1391 if (!ns)
1392 return;
1393 if (ns <= NSEC_PER_USEC) {
1394 ndelay(ns);
1395 } else {
1396 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1397
1398 if (us <= 10)
1399 udelay(us);
1400 else
1401 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1402 }
1403}
1404
1405int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1406{
1407 u32 delay = _delay->value;
1408 u32 unit = _delay->unit;
1409 u32 hz;
1410
1411 if (!delay)
1412 return 0;
1413
1414 switch (unit) {
1415 case SPI_DELAY_UNIT_USECS:
1416 delay *= NSEC_PER_USEC;
1417 break;
1418 case SPI_DELAY_UNIT_NSECS:
1419 /* Nothing to do here */
1420 break;
1421 case SPI_DELAY_UNIT_SCK:
1422 /* Clock cycles need to be obtained from spi_transfer */
1423 if (!xfer)
1424 return -EINVAL;
1425 /*
1426 * If there is unknown effective speed, approximate it
1427 * by underestimating with half of the requested hz.
1428 */
1429 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1430 if (!hz)
1431 return -EINVAL;
1432
1433 /* Convert delay to nanoseconds */
1434 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1435 break;
1436 default:
1437 return -EINVAL;
1438 }
1439
1440 return delay;
1441}
1442EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1443
1444int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1445{
1446 int delay;
1447
1448 might_sleep();
1449
1450 if (!_delay)
1451 return -EINVAL;
1452
1453 delay = spi_delay_to_ns(_delay, xfer);
1454 if (delay < 0)
1455 return delay;
1456
1457 _spi_transfer_delay_ns(delay);
1458
1459 return 0;
1460}
1461EXPORT_SYMBOL_GPL(spi_delay_exec);
1462
1463static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1464 struct spi_transfer *xfer)
1465{
1466 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1467 u32 delay = xfer->cs_change_delay.value;
1468 u32 unit = xfer->cs_change_delay.unit;
1469 int ret;
1470
1471 /* Return early on "fast" mode - for everything but USECS */
1472 if (!delay) {
1473 if (unit == SPI_DELAY_UNIT_USECS)
1474 _spi_transfer_delay_ns(default_delay_ns);
1475 return;
1476 }
1477
1478 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1479 if (ret) {
1480 dev_err_once(&msg->spi->dev,
1481 "Use of unsupported delay unit %i, using default of %luus\n",
1482 unit, default_delay_ns / NSEC_PER_USEC);
1483 _spi_transfer_delay_ns(default_delay_ns);
1484 }
1485}
1486
1487/*
1488 * spi_transfer_one_message - Default implementation of transfer_one_message()
1489 *
1490 * This is a standard implementation of transfer_one_message() for
1491 * drivers which implement a transfer_one() operation. It provides
1492 * standard handling of delays and chip select management.
1493 */
1494static int spi_transfer_one_message(struct spi_controller *ctlr,
1495 struct spi_message *msg)
1496{
1497 struct spi_transfer *xfer;
1498 bool keep_cs = false;
1499 int ret = 0;
1500 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1501 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1502
1503 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1504 spi_set_cs(msg->spi, !xfer->cs_off, false);
1505
1506 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1507 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1508
1509 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1510 trace_spi_transfer_start(msg, xfer);
1511
1512 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1513 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1514
1515 if (!ctlr->ptp_sts_supported) {
1516 xfer->ptp_sts_word_pre = 0;
1517 ptp_read_system_prets(xfer->ptp_sts);
1518 }
1519
1520 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1521 reinit_completion(&ctlr->xfer_completion);
1522
1523fallback_pio:
1524 spi_dma_sync_for_device(ctlr, xfer);
1525 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1526 if (ret < 0) {
1527 spi_dma_sync_for_cpu(ctlr, xfer);
1528
1529 if (ctlr->cur_msg_mapped &&
1530 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1531 __spi_unmap_msg(ctlr, msg);
1532 ctlr->fallback = true;
1533 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1534 goto fallback_pio;
1535 }
1536
1537 SPI_STATISTICS_INCREMENT_FIELD(statm,
1538 errors);
1539 SPI_STATISTICS_INCREMENT_FIELD(stats,
1540 errors);
1541 dev_err(&msg->spi->dev,
1542 "SPI transfer failed: %d\n", ret);
1543 goto out;
1544 }
1545
1546 if (ret > 0) {
1547 ret = spi_transfer_wait(ctlr, msg, xfer);
1548 if (ret < 0)
1549 msg->status = ret;
1550 }
1551
1552 spi_dma_sync_for_cpu(ctlr, xfer);
1553 } else {
1554 if (xfer->len)
1555 dev_err(&msg->spi->dev,
1556 "Bufferless transfer has length %u\n",
1557 xfer->len);
1558 }
1559
1560 if (!ctlr->ptp_sts_supported) {
1561 ptp_read_system_postts(xfer->ptp_sts);
1562 xfer->ptp_sts_word_post = xfer->len;
1563 }
1564
1565 trace_spi_transfer_stop(msg, xfer);
1566
1567 if (msg->status != -EINPROGRESS)
1568 goto out;
1569
1570 spi_transfer_delay_exec(xfer);
1571
1572 if (xfer->cs_change) {
1573 if (list_is_last(&xfer->transfer_list,
1574 &msg->transfers)) {
1575 keep_cs = true;
1576 } else {
1577 if (!xfer->cs_off)
1578 spi_set_cs(msg->spi, false, false);
1579 _spi_transfer_cs_change_delay(msg, xfer);
1580 if (!list_next_entry(xfer, transfer_list)->cs_off)
1581 spi_set_cs(msg->spi, true, false);
1582 }
1583 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1584 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1585 spi_set_cs(msg->spi, xfer->cs_off, false);
1586 }
1587
1588 msg->actual_length += xfer->len;
1589 }
1590
1591out:
1592 if (ret != 0 || !keep_cs)
1593 spi_set_cs(msg->spi, false, false);
1594
1595 if (msg->status == -EINPROGRESS)
1596 msg->status = ret;
1597
1598 if (msg->status && ctlr->handle_err)
1599 ctlr->handle_err(ctlr, msg);
1600
1601 spi_finalize_current_message(ctlr);
1602
1603 return ret;
1604}
1605
1606/**
1607 * spi_finalize_current_transfer - report completion of a transfer
1608 * @ctlr: the controller reporting completion
1609 *
1610 * Called by SPI drivers using the core transfer_one_message()
1611 * implementation to notify it that the current interrupt driven
1612 * transfer has finished and the next one may be scheduled.
1613 */
1614void spi_finalize_current_transfer(struct spi_controller *ctlr)
1615{
1616 complete(&ctlr->xfer_completion);
1617}
1618EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1619
1620static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1621{
1622 if (ctlr->auto_runtime_pm) {
1623 pm_runtime_mark_last_busy(ctlr->dev.parent);
1624 pm_runtime_put_autosuspend(ctlr->dev.parent);
1625 }
1626}
1627
1628static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1629 struct spi_message *msg, bool was_busy)
1630{
1631 struct spi_transfer *xfer;
1632 int ret;
1633
1634 if (!was_busy && ctlr->auto_runtime_pm) {
1635 ret = pm_runtime_get_sync(ctlr->dev.parent);
1636 if (ret < 0) {
1637 pm_runtime_put_noidle(ctlr->dev.parent);
1638 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1639 ret);
1640 return ret;
1641 }
1642 }
1643
1644 if (!was_busy)
1645 trace_spi_controller_busy(ctlr);
1646
1647 if (!was_busy && ctlr->prepare_transfer_hardware) {
1648 ret = ctlr->prepare_transfer_hardware(ctlr);
1649 if (ret) {
1650 dev_err(&ctlr->dev,
1651 "failed to prepare transfer hardware: %d\n",
1652 ret);
1653
1654 if (ctlr->auto_runtime_pm)
1655 pm_runtime_put(ctlr->dev.parent);
1656
1657 msg->status = ret;
1658 spi_finalize_current_message(ctlr);
1659
1660 return ret;
1661 }
1662 }
1663
1664 trace_spi_message_start(msg);
1665
1666 ret = spi_split_transfers_maxsize(ctlr, msg,
1667 spi_max_transfer_size(msg->spi),
1668 GFP_KERNEL | GFP_DMA);
1669 if (ret) {
1670 msg->status = ret;
1671 spi_finalize_current_message(ctlr);
1672 return ret;
1673 }
1674
1675 if (ctlr->prepare_message) {
1676 ret = ctlr->prepare_message(ctlr, msg);
1677 if (ret) {
1678 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1679 ret);
1680 msg->status = ret;
1681 spi_finalize_current_message(ctlr);
1682 return ret;
1683 }
1684 msg->prepared = true;
1685 }
1686
1687 ret = spi_map_msg(ctlr, msg);
1688 if (ret) {
1689 msg->status = ret;
1690 spi_finalize_current_message(ctlr);
1691 return ret;
1692 }
1693
1694 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1695 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1696 xfer->ptp_sts_word_pre = 0;
1697 ptp_read_system_prets(xfer->ptp_sts);
1698 }
1699 }
1700
1701 /*
1702 * Drivers implementation of transfer_one_message() must arrange for
1703 * spi_finalize_current_message() to get called. Most drivers will do
1704 * this in the calling context, but some don't. For those cases, a
1705 * completion is used to guarantee that this function does not return
1706 * until spi_finalize_current_message() is done accessing
1707 * ctlr->cur_msg.
1708 * Use of the following two flags enable to opportunistically skip the
1709 * use of the completion since its use involves expensive spin locks.
1710 * In case of a race with the context that calls
1711 * spi_finalize_current_message() the completion will always be used,
1712 * due to strict ordering of these flags using barriers.
1713 */
1714 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1715 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1716 reinit_completion(&ctlr->cur_msg_completion);
1717 smp_wmb(); /* Make these available to spi_finalize_current_message() */
1718
1719 ret = ctlr->transfer_one_message(ctlr, msg);
1720 if (ret) {
1721 dev_err(&ctlr->dev,
1722 "failed to transfer one message from queue\n");
1723 return ret;
1724 }
1725
1726 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1727 smp_mb(); /* See spi_finalize_current_message()... */
1728 if (READ_ONCE(ctlr->cur_msg_incomplete))
1729 wait_for_completion(&ctlr->cur_msg_completion);
1730
1731 return 0;
1732}
1733
1734/**
1735 * __spi_pump_messages - function which processes spi message queue
1736 * @ctlr: controller to process queue for
1737 * @in_kthread: true if we are in the context of the message pump thread
1738 *
1739 * This function checks if there is any spi message in the queue that
1740 * needs processing and if so call out to the driver to initialize hardware
1741 * and transfer each message.
1742 *
1743 * Note that it is called both from the kthread itself and also from
1744 * inside spi_sync(); the queue extraction handling at the top of the
1745 * function should deal with this safely.
1746 */
1747static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1748{
1749 struct spi_message *msg;
1750 bool was_busy = false;
1751 unsigned long flags;
1752 int ret;
1753
1754 /* Take the IO mutex */
1755 mutex_lock(&ctlr->io_mutex);
1756
1757 /* Lock queue */
1758 spin_lock_irqsave(&ctlr->queue_lock, flags);
1759
1760 /* Make sure we are not already running a message */
1761 if (ctlr->cur_msg)
1762 goto out_unlock;
1763
1764 /* Check if the queue is idle */
1765 if (list_empty(&ctlr->queue) || !ctlr->running) {
1766 if (!ctlr->busy)
1767 goto out_unlock;
1768
1769 /* Defer any non-atomic teardown to the thread */
1770 if (!in_kthread) {
1771 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1772 !ctlr->unprepare_transfer_hardware) {
1773 spi_idle_runtime_pm(ctlr);
1774 ctlr->busy = false;
1775 ctlr->queue_empty = true;
1776 trace_spi_controller_idle(ctlr);
1777 } else {
1778 kthread_queue_work(ctlr->kworker,
1779 &ctlr->pump_messages);
1780 }
1781 goto out_unlock;
1782 }
1783
1784 ctlr->busy = false;
1785 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1786
1787 kfree(ctlr->dummy_rx);
1788 ctlr->dummy_rx = NULL;
1789 kfree(ctlr->dummy_tx);
1790 ctlr->dummy_tx = NULL;
1791 if (ctlr->unprepare_transfer_hardware &&
1792 ctlr->unprepare_transfer_hardware(ctlr))
1793 dev_err(&ctlr->dev,
1794 "failed to unprepare transfer hardware\n");
1795 spi_idle_runtime_pm(ctlr);
1796 trace_spi_controller_idle(ctlr);
1797
1798 spin_lock_irqsave(&ctlr->queue_lock, flags);
1799 ctlr->queue_empty = true;
1800 goto out_unlock;
1801 }
1802
1803 /* Extract head of queue */
1804 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1805 ctlr->cur_msg = msg;
1806
1807 list_del_init(&msg->queue);
1808 if (ctlr->busy)
1809 was_busy = true;
1810 else
1811 ctlr->busy = true;
1812 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1813
1814 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1815 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1816
1817 ctlr->cur_msg = NULL;
1818 ctlr->fallback = false;
1819
1820 mutex_unlock(&ctlr->io_mutex);
1821
1822 /* Prod the scheduler in case transfer_one() was busy waiting */
1823 if (!ret)
1824 cond_resched();
1825 return;
1826
1827out_unlock:
1828 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1829 mutex_unlock(&ctlr->io_mutex);
1830}
1831
1832/**
1833 * spi_pump_messages - kthread work function which processes spi message queue
1834 * @work: pointer to kthread work struct contained in the controller struct
1835 */
1836static void spi_pump_messages(struct kthread_work *work)
1837{
1838 struct spi_controller *ctlr =
1839 container_of(work, struct spi_controller, pump_messages);
1840
1841 __spi_pump_messages(ctlr, true);
1842}
1843
1844/**
1845 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1846 * @ctlr: Pointer to the spi_controller structure of the driver
1847 * @xfer: Pointer to the transfer being timestamped
1848 * @progress: How many words (not bytes) have been transferred so far
1849 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1850 * transfer, for less jitter in time measurement. Only compatible
1851 * with PIO drivers. If true, must follow up with
1852 * spi_take_timestamp_post or otherwise system will crash.
1853 * WARNING: for fully predictable results, the CPU frequency must
1854 * also be under control (governor).
1855 *
1856 * This is a helper for drivers to collect the beginning of the TX timestamp
1857 * for the requested byte from the SPI transfer. The frequency with which this
1858 * function must be called (once per word, once for the whole transfer, once
1859 * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1860 * greater than or equal to the requested byte at the time of the call. The
1861 * timestamp is only taken once, at the first such call. It is assumed that
1862 * the driver advances its @tx buffer pointer monotonically.
1863 */
1864void spi_take_timestamp_pre(struct spi_controller *ctlr,
1865 struct spi_transfer *xfer,
1866 size_t progress, bool irqs_off)
1867{
1868 if (!xfer->ptp_sts)
1869 return;
1870
1871 if (xfer->timestamped)
1872 return;
1873
1874 if (progress > xfer->ptp_sts_word_pre)
1875 return;
1876
1877 /* Capture the resolution of the timestamp */
1878 xfer->ptp_sts_word_pre = progress;
1879
1880 if (irqs_off) {
1881 local_irq_save(ctlr->irq_flags);
1882 preempt_disable();
1883 }
1884
1885 ptp_read_system_prets(xfer->ptp_sts);
1886}
1887EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1888
1889/**
1890 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1891 * @ctlr: Pointer to the spi_controller structure of the driver
1892 * @xfer: Pointer to the transfer being timestamped
1893 * @progress: How many words (not bytes) have been transferred so far
1894 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1895 *
1896 * This is a helper for drivers to collect the end of the TX timestamp for
1897 * the requested byte from the SPI transfer. Can be called with an arbitrary
1898 * frequency: only the first call where @tx exceeds or is equal to the
1899 * requested word will be timestamped.
1900 */
1901void spi_take_timestamp_post(struct spi_controller *ctlr,
1902 struct spi_transfer *xfer,
1903 size_t progress, bool irqs_off)
1904{
1905 if (!xfer->ptp_sts)
1906 return;
1907
1908 if (xfer->timestamped)
1909 return;
1910
1911 if (progress < xfer->ptp_sts_word_post)
1912 return;
1913
1914 ptp_read_system_postts(xfer->ptp_sts);
1915
1916 if (irqs_off) {
1917 local_irq_restore(ctlr->irq_flags);
1918 preempt_enable();
1919 }
1920
1921 /* Capture the resolution of the timestamp */
1922 xfer->ptp_sts_word_post = progress;
1923
1924 xfer->timestamped = true;
1925}
1926EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1927
1928/**
1929 * spi_set_thread_rt - set the controller to pump at realtime priority
1930 * @ctlr: controller to boost priority of
1931 *
1932 * This can be called because the controller requested realtime priority
1933 * (by setting the ->rt value before calling spi_register_controller()) or
1934 * because a device on the bus said that its transfers needed realtime
1935 * priority.
1936 *
1937 * NOTE: at the moment if any device on a bus says it needs realtime then
1938 * the thread will be at realtime priority for all transfers on that
1939 * controller. If this eventually becomes a problem we may see if we can
1940 * find a way to boost the priority only temporarily during relevant
1941 * transfers.
1942 */
1943static void spi_set_thread_rt(struct spi_controller *ctlr)
1944{
1945 dev_info(&ctlr->dev,
1946 "will run message pump with realtime priority\n");
1947 sched_set_fifo(ctlr->kworker->task);
1948}
1949
1950static int spi_init_queue(struct spi_controller *ctlr)
1951{
1952 ctlr->running = false;
1953 ctlr->busy = false;
1954 ctlr->queue_empty = true;
1955
1956 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1957 if (IS_ERR(ctlr->kworker)) {
1958 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1959 return PTR_ERR(ctlr->kworker);
1960 }
1961
1962 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1963
1964 /*
1965 * Controller config will indicate if this controller should run the
1966 * message pump with high (realtime) priority to reduce the transfer
1967 * latency on the bus by minimising the delay between a transfer
1968 * request and the scheduling of the message pump thread. Without this
1969 * setting the message pump thread will remain at default priority.
1970 */
1971 if (ctlr->rt)
1972 spi_set_thread_rt(ctlr);
1973
1974 return 0;
1975}
1976
1977/**
1978 * spi_get_next_queued_message() - called by driver to check for queued
1979 * messages
1980 * @ctlr: the controller to check for queued messages
1981 *
1982 * If there are more messages in the queue, the next message is returned from
1983 * this call.
1984 *
1985 * Return: the next message in the queue, else NULL if the queue is empty.
1986 */
1987struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1988{
1989 struct spi_message *next;
1990 unsigned long flags;
1991
1992 /* Get a pointer to the next message, if any */
1993 spin_lock_irqsave(&ctlr->queue_lock, flags);
1994 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1995 queue);
1996 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1997
1998 return next;
1999}
2000EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2001
2002/**
2003 * spi_finalize_current_message() - the current message is complete
2004 * @ctlr: the controller to return the message to
2005 *
2006 * Called by the driver to notify the core that the message in the front of the
2007 * queue is complete and can be removed from the queue.
2008 */
2009void spi_finalize_current_message(struct spi_controller *ctlr)
2010{
2011 struct spi_transfer *xfer;
2012 struct spi_message *mesg;
2013 int ret;
2014
2015 mesg = ctlr->cur_msg;
2016
2017 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2018 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2019 ptp_read_system_postts(xfer->ptp_sts);
2020 xfer->ptp_sts_word_post = xfer->len;
2021 }
2022 }
2023
2024 if (unlikely(ctlr->ptp_sts_supported))
2025 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2026 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2027
2028 spi_unmap_msg(ctlr, mesg);
2029
2030 /*
2031 * In the prepare_messages callback the SPI bus has the opportunity
2032 * to split a transfer to smaller chunks.
2033 *
2034 * Release the split transfers here since spi_map_msg() is done on
2035 * the split transfers.
2036 */
2037 spi_res_release(ctlr, mesg);
2038
2039 if (mesg->prepared && ctlr->unprepare_message) {
2040 ret = ctlr->unprepare_message(ctlr, mesg);
2041 if (ret) {
2042 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2043 ret);
2044 }
2045 }
2046
2047 mesg->prepared = false;
2048
2049 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2050 smp_mb(); /* See __spi_pump_transfer_message()... */
2051 if (READ_ONCE(ctlr->cur_msg_need_completion))
2052 complete(&ctlr->cur_msg_completion);
2053
2054 trace_spi_message_done(mesg);
2055
2056 mesg->state = NULL;
2057 if (mesg->complete)
2058 mesg->complete(mesg->context);
2059}
2060EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2061
2062static int spi_start_queue(struct spi_controller *ctlr)
2063{
2064 unsigned long flags;
2065
2066 spin_lock_irqsave(&ctlr->queue_lock, flags);
2067
2068 if (ctlr->running || ctlr->busy) {
2069 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2070 return -EBUSY;
2071 }
2072
2073 ctlr->running = true;
2074 ctlr->cur_msg = NULL;
2075 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2076
2077 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2078
2079 return 0;
2080}
2081
2082static int spi_stop_queue(struct spi_controller *ctlr)
2083{
2084 unsigned long flags;
2085 unsigned limit = 500;
2086 int ret = 0;
2087
2088 spin_lock_irqsave(&ctlr->queue_lock, flags);
2089
2090 /*
2091 * This is a bit lame, but is optimized for the common execution path.
2092 * A wait_queue on the ctlr->busy could be used, but then the common
2093 * execution path (pump_messages) would be required to call wake_up or
2094 * friends on every SPI message. Do this instead.
2095 */
2096 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2097 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2098 usleep_range(10000, 11000);
2099 spin_lock_irqsave(&ctlr->queue_lock, flags);
2100 }
2101
2102 if (!list_empty(&ctlr->queue) || ctlr->busy)
2103 ret = -EBUSY;
2104 else
2105 ctlr->running = false;
2106
2107 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2108
2109 if (ret) {
2110 dev_warn(&ctlr->dev, "could not stop message queue\n");
2111 return ret;
2112 }
2113 return ret;
2114}
2115
2116static int spi_destroy_queue(struct spi_controller *ctlr)
2117{
2118 int ret;
2119
2120 ret = spi_stop_queue(ctlr);
2121
2122 /*
2123 * kthread_flush_worker will block until all work is done.
2124 * If the reason that stop_queue timed out is that the work will never
2125 * finish, then it does no good to call flush/stop thread, so
2126 * return anyway.
2127 */
2128 if (ret) {
2129 dev_err(&ctlr->dev, "problem destroying queue\n");
2130 return ret;
2131 }
2132
2133 kthread_destroy_worker(ctlr->kworker);
2134
2135 return 0;
2136}
2137
2138static int __spi_queued_transfer(struct spi_device *spi,
2139 struct spi_message *msg,
2140 bool need_pump)
2141{
2142 struct spi_controller *ctlr = spi->controller;
2143 unsigned long flags;
2144
2145 spin_lock_irqsave(&ctlr->queue_lock, flags);
2146
2147 if (!ctlr->running) {
2148 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2149 return -ESHUTDOWN;
2150 }
2151 msg->actual_length = 0;
2152 msg->status = -EINPROGRESS;
2153
2154 list_add_tail(&msg->queue, &ctlr->queue);
2155 ctlr->queue_empty = false;
2156 if (!ctlr->busy && need_pump)
2157 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2158
2159 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2160 return 0;
2161}
2162
2163/**
2164 * spi_queued_transfer - transfer function for queued transfers
2165 * @spi: spi device which is requesting transfer
2166 * @msg: spi message which is to handled is queued to driver queue
2167 *
2168 * Return: zero on success, else a negative error code.
2169 */
2170static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2171{
2172 return __spi_queued_transfer(spi, msg, true);
2173}
2174
2175static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2176{
2177 int ret;
2178
2179 ctlr->transfer = spi_queued_transfer;
2180 if (!ctlr->transfer_one_message)
2181 ctlr->transfer_one_message = spi_transfer_one_message;
2182
2183 /* Initialize and start queue */
2184 ret = spi_init_queue(ctlr);
2185 if (ret) {
2186 dev_err(&ctlr->dev, "problem initializing queue\n");
2187 goto err_init_queue;
2188 }
2189 ctlr->queued = true;
2190 ret = spi_start_queue(ctlr);
2191 if (ret) {
2192 dev_err(&ctlr->dev, "problem starting queue\n");
2193 goto err_start_queue;
2194 }
2195
2196 return 0;
2197
2198err_start_queue:
2199 spi_destroy_queue(ctlr);
2200err_init_queue:
2201 return ret;
2202}
2203
2204/**
2205 * spi_flush_queue - Send all pending messages in the queue from the callers'
2206 * context
2207 * @ctlr: controller to process queue for
2208 *
2209 * This should be used when one wants to ensure all pending messages have been
2210 * sent before doing something. Is used by the spi-mem code to make sure SPI
2211 * memory operations do not preempt regular SPI transfers that have been queued
2212 * before the spi-mem operation.
2213 */
2214void spi_flush_queue(struct spi_controller *ctlr)
2215{
2216 if (ctlr->transfer == spi_queued_transfer)
2217 __spi_pump_messages(ctlr, false);
2218}
2219
2220/*-------------------------------------------------------------------------*/
2221
2222#if defined(CONFIG_OF)
2223static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2224 struct device_node *nc)
2225{
2226 u32 value;
2227 u16 cs_setup;
2228 int rc;
2229
2230 /* Mode (clock phase/polarity/etc.) */
2231 if (of_property_read_bool(nc, "spi-cpha"))
2232 spi->mode |= SPI_CPHA;
2233 if (of_property_read_bool(nc, "spi-cpol"))
2234 spi->mode |= SPI_CPOL;
2235 if (of_property_read_bool(nc, "spi-3wire"))
2236 spi->mode |= SPI_3WIRE;
2237 if (of_property_read_bool(nc, "spi-lsb-first"))
2238 spi->mode |= SPI_LSB_FIRST;
2239 if (of_property_read_bool(nc, "spi-cs-high"))
2240 spi->mode |= SPI_CS_HIGH;
2241
2242 /* Device DUAL/QUAD mode */
2243 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2244 switch (value) {
2245 case 0:
2246 spi->mode |= SPI_NO_TX;
2247 break;
2248 case 1:
2249 break;
2250 case 2:
2251 spi->mode |= SPI_TX_DUAL;
2252 break;
2253 case 4:
2254 spi->mode |= SPI_TX_QUAD;
2255 break;
2256 case 8:
2257 spi->mode |= SPI_TX_OCTAL;
2258 break;
2259 default:
2260 dev_warn(&ctlr->dev,
2261 "spi-tx-bus-width %d not supported\n",
2262 value);
2263 break;
2264 }
2265 }
2266
2267 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2268 switch (value) {
2269 case 0:
2270 spi->mode |= SPI_NO_RX;
2271 break;
2272 case 1:
2273 break;
2274 case 2:
2275 spi->mode |= SPI_RX_DUAL;
2276 break;
2277 case 4:
2278 spi->mode |= SPI_RX_QUAD;
2279 break;
2280 case 8:
2281 spi->mode |= SPI_RX_OCTAL;
2282 break;
2283 default:
2284 dev_warn(&ctlr->dev,
2285 "spi-rx-bus-width %d not supported\n",
2286 value);
2287 break;
2288 }
2289 }
2290
2291 if (spi_controller_is_slave(ctlr)) {
2292 if (!of_node_name_eq(nc, "slave")) {
2293 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2294 nc);
2295 return -EINVAL;
2296 }
2297 return 0;
2298 }
2299
2300 /* Device address */
2301 rc = of_property_read_u32(nc, "reg", &value);
2302 if (rc) {
2303 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2304 nc, rc);
2305 return rc;
2306 }
2307 spi->chip_select = value;
2308
2309 /* Device speed */
2310 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2311 spi->max_speed_hz = value;
2312
2313 if (!of_property_read_u16(nc, "spi-cs-setup-delay-ns", &cs_setup)) {
2314 spi->cs_setup.value = cs_setup;
2315 spi->cs_setup.unit = SPI_DELAY_UNIT_NSECS;
2316 }
2317
2318 return 0;
2319}
2320
2321static struct spi_device *
2322of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2323{
2324 struct spi_device *spi;
2325 int rc;
2326
2327 /* Alloc an spi_device */
2328 spi = spi_alloc_device(ctlr);
2329 if (!spi) {
2330 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2331 rc = -ENOMEM;
2332 goto err_out;
2333 }
2334
2335 /* Select device driver */
2336 rc = of_modalias_node(nc, spi->modalias,
2337 sizeof(spi->modalias));
2338 if (rc < 0) {
2339 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2340 goto err_out;
2341 }
2342
2343 rc = of_spi_parse_dt(ctlr, spi, nc);
2344 if (rc)
2345 goto err_out;
2346
2347 /* Store a pointer to the node in the device structure */
2348 of_node_get(nc);
2349 spi->dev.of_node = nc;
2350 spi->dev.fwnode = of_fwnode_handle(nc);
2351
2352 /* Register the new device */
2353 rc = spi_add_device(spi);
2354 if (rc) {
2355 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2356 goto err_of_node_put;
2357 }
2358
2359 return spi;
2360
2361err_of_node_put:
2362 of_node_put(nc);
2363err_out:
2364 spi_dev_put(spi);
2365 return ERR_PTR(rc);
2366}
2367
2368/**
2369 * of_register_spi_devices() - Register child devices onto the SPI bus
2370 * @ctlr: Pointer to spi_controller device
2371 *
2372 * Registers an spi_device for each child node of controller node which
2373 * represents a valid SPI slave.
2374 */
2375static void of_register_spi_devices(struct spi_controller *ctlr)
2376{
2377 struct spi_device *spi;
2378 struct device_node *nc;
2379
2380 if (!ctlr->dev.of_node)
2381 return;
2382
2383 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2384 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2385 continue;
2386 spi = of_register_spi_device(ctlr, nc);
2387 if (IS_ERR(spi)) {
2388 dev_warn(&ctlr->dev,
2389 "Failed to create SPI device for %pOF\n", nc);
2390 of_node_clear_flag(nc, OF_POPULATED);
2391 }
2392 }
2393}
2394#else
2395static void of_register_spi_devices(struct spi_controller *ctlr) { }
2396#endif
2397
2398/**
2399 * spi_new_ancillary_device() - Register ancillary SPI device
2400 * @spi: Pointer to the main SPI device registering the ancillary device
2401 * @chip_select: Chip Select of the ancillary device
2402 *
2403 * Register an ancillary SPI device; for example some chips have a chip-select
2404 * for normal device usage and another one for setup/firmware upload.
2405 *
2406 * This may only be called from main SPI device's probe routine.
2407 *
2408 * Return: 0 on success; negative errno on failure
2409 */
2410struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2411 u8 chip_select)
2412{
2413 struct spi_device *ancillary;
2414 int rc = 0;
2415
2416 /* Alloc an spi_device */
2417 ancillary = spi_alloc_device(spi->controller);
2418 if (!ancillary) {
2419 rc = -ENOMEM;
2420 goto err_out;
2421 }
2422
2423 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2424
2425 /* Use provided chip-select for ancillary device */
2426 ancillary->chip_select = chip_select;
2427
2428 /* Take over SPI mode/speed from SPI main device */
2429 ancillary->max_speed_hz = spi->max_speed_hz;
2430 ancillary->mode = spi->mode;
2431
2432 /* Register the new device */
2433 rc = spi_add_device_locked(ancillary);
2434 if (rc) {
2435 dev_err(&spi->dev, "failed to register ancillary device\n");
2436 goto err_out;
2437 }
2438
2439 return ancillary;
2440
2441err_out:
2442 spi_dev_put(ancillary);
2443 return ERR_PTR(rc);
2444}
2445EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2446
2447#ifdef CONFIG_ACPI
2448struct acpi_spi_lookup {
2449 struct spi_controller *ctlr;
2450 u32 max_speed_hz;
2451 u32 mode;
2452 int irq;
2453 u8 bits_per_word;
2454 u8 chip_select;
2455 int n;
2456 int index;
2457};
2458
2459static int acpi_spi_count(struct acpi_resource *ares, void *data)
2460{
2461 struct acpi_resource_spi_serialbus *sb;
2462 int *count = data;
2463
2464 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2465 return 1;
2466
2467 sb = &ares->data.spi_serial_bus;
2468 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2469 return 1;
2470
2471 *count = *count + 1;
2472
2473 return 1;
2474}
2475
2476/**
2477 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2478 * @adev: ACPI device
2479 *
2480 * Returns the number of SpiSerialBus resources in the ACPI-device's
2481 * resource-list; or a negative error code.
2482 */
2483int acpi_spi_count_resources(struct acpi_device *adev)
2484{
2485 LIST_HEAD(r);
2486 int count = 0;
2487 int ret;
2488
2489 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2490 if (ret < 0)
2491 return ret;
2492
2493 acpi_dev_free_resource_list(&r);
2494
2495 return count;
2496}
2497EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2498
2499static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2500 struct acpi_spi_lookup *lookup)
2501{
2502 const union acpi_object *obj;
2503
2504 if (!x86_apple_machine)
2505 return;
2506
2507 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2508 && obj->buffer.length >= 4)
2509 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2510
2511 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2512 && obj->buffer.length == 8)
2513 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2514
2515 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2516 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2517 lookup->mode |= SPI_LSB_FIRST;
2518
2519 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2520 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2521 lookup->mode |= SPI_CPOL;
2522
2523 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2524 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2525 lookup->mode |= SPI_CPHA;
2526}
2527
2528static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
2529
2530static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2531{
2532 struct acpi_spi_lookup *lookup = data;
2533 struct spi_controller *ctlr = lookup->ctlr;
2534
2535 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2536 struct acpi_resource_spi_serialbus *sb;
2537 acpi_handle parent_handle;
2538 acpi_status status;
2539
2540 sb = &ares->data.spi_serial_bus;
2541 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2542
2543 if (lookup->index != -1 && lookup->n++ != lookup->index)
2544 return 1;
2545
2546 status = acpi_get_handle(NULL,
2547 sb->resource_source.string_ptr,
2548 &parent_handle);
2549
2550 if (ACPI_FAILURE(status))
2551 return -ENODEV;
2552
2553 if (ctlr) {
2554 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2555 return -ENODEV;
2556 } else {
2557 struct acpi_device *adev;
2558
2559 adev = acpi_fetch_acpi_dev(parent_handle);
2560 if (!adev)
2561 return -ENODEV;
2562
2563 ctlr = acpi_spi_find_controller_by_adev(adev);
2564 if (!ctlr)
2565 return -EPROBE_DEFER;
2566
2567 lookup->ctlr = ctlr;
2568 }
2569
2570 /*
2571 * ACPI DeviceSelection numbering is handled by the
2572 * host controller driver in Windows and can vary
2573 * from driver to driver. In Linux we always expect
2574 * 0 .. max - 1 so we need to ask the driver to
2575 * translate between the two schemes.
2576 */
2577 if (ctlr->fw_translate_cs) {
2578 int cs = ctlr->fw_translate_cs(ctlr,
2579 sb->device_selection);
2580 if (cs < 0)
2581 return cs;
2582 lookup->chip_select = cs;
2583 } else {
2584 lookup->chip_select = sb->device_selection;
2585 }
2586
2587 lookup->max_speed_hz = sb->connection_speed;
2588 lookup->bits_per_word = sb->data_bit_length;
2589
2590 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2591 lookup->mode |= SPI_CPHA;
2592 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2593 lookup->mode |= SPI_CPOL;
2594 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2595 lookup->mode |= SPI_CS_HIGH;
2596 }
2597 } else if (lookup->irq < 0) {
2598 struct resource r;
2599
2600 if (acpi_dev_resource_interrupt(ares, 0, &r))
2601 lookup->irq = r.start;
2602 }
2603
2604 /* Always tell the ACPI core to skip this resource */
2605 return 1;
2606}
2607
2608/**
2609 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2610 * @ctlr: controller to which the spi device belongs
2611 * @adev: ACPI Device for the spi device
2612 * @index: Index of the spi resource inside the ACPI Node
2613 *
2614 * This should be used to allocate a new spi device from and ACPI Node.
2615 * The caller is responsible for calling spi_add_device to register the spi device.
2616 *
2617 * If ctlr is set to NULL, the Controller for the spi device will be looked up
2618 * using the resource.
2619 * If index is set to -1, index is not used.
2620 * Note: If index is -1, ctlr must be set.
2621 *
2622 * Return: a pointer to the new device, or ERR_PTR on error.
2623 */
2624struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2625 struct acpi_device *adev,
2626 int index)
2627{
2628 acpi_handle parent_handle = NULL;
2629 struct list_head resource_list;
2630 struct acpi_spi_lookup lookup = {};
2631 struct spi_device *spi;
2632 int ret;
2633
2634 if (!ctlr && index == -1)
2635 return ERR_PTR(-EINVAL);
2636
2637 lookup.ctlr = ctlr;
2638 lookup.irq = -1;
2639 lookup.index = index;
2640 lookup.n = 0;
2641
2642 INIT_LIST_HEAD(&resource_list);
2643 ret = acpi_dev_get_resources(adev, &resource_list,
2644 acpi_spi_add_resource, &lookup);
2645 acpi_dev_free_resource_list(&resource_list);
2646
2647 if (ret < 0)
2648 /* Found SPI in _CRS but it points to another controller */
2649 return ERR_PTR(ret);
2650
2651 if (!lookup.max_speed_hz &&
2652 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2653 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2654 /* Apple does not use _CRS but nested devices for SPI slaves */
2655 acpi_spi_parse_apple_properties(adev, &lookup);
2656 }
2657
2658 if (!lookup.max_speed_hz)
2659 return ERR_PTR(-ENODEV);
2660
2661 spi = spi_alloc_device(lookup.ctlr);
2662 if (!spi) {
2663 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2664 dev_name(&adev->dev));
2665 return ERR_PTR(-ENOMEM);
2666 }
2667
2668 ACPI_COMPANION_SET(&spi->dev, adev);
2669 spi->max_speed_hz = lookup.max_speed_hz;
2670 spi->mode |= lookup.mode;
2671 spi->irq = lookup.irq;
2672 spi->bits_per_word = lookup.bits_per_word;
2673 spi->chip_select = lookup.chip_select;
2674
2675 return spi;
2676}
2677EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2678
2679static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2680 struct acpi_device *adev)
2681{
2682 struct spi_device *spi;
2683
2684 if (acpi_bus_get_status(adev) || !adev->status.present ||
2685 acpi_device_enumerated(adev))
2686 return AE_OK;
2687
2688 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2689 if (IS_ERR(spi)) {
2690 if (PTR_ERR(spi) == -ENOMEM)
2691 return AE_NO_MEMORY;
2692 else
2693 return AE_OK;
2694 }
2695
2696 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2697 sizeof(spi->modalias));
2698
2699 if (spi->irq < 0)
2700 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2701
2702 acpi_device_set_enumerated(adev);
2703
2704 adev->power.flags.ignore_parent = true;
2705 if (spi_add_device(spi)) {
2706 adev->power.flags.ignore_parent = false;
2707 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2708 dev_name(&adev->dev));
2709 spi_dev_put(spi);
2710 }
2711
2712 return AE_OK;
2713}
2714
2715static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2716 void *data, void **return_value)
2717{
2718 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2719 struct spi_controller *ctlr = data;
2720
2721 if (!adev)
2722 return AE_OK;
2723
2724 return acpi_register_spi_device(ctlr, adev);
2725}
2726
2727#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2728
2729static void acpi_register_spi_devices(struct spi_controller *ctlr)
2730{
2731 acpi_status status;
2732 acpi_handle handle;
2733
2734 handle = ACPI_HANDLE(ctlr->dev.parent);
2735 if (!handle)
2736 return;
2737
2738 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2739 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2740 acpi_spi_add_device, NULL, ctlr, NULL);
2741 if (ACPI_FAILURE(status))
2742 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2743}
2744#else
2745static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2746#endif /* CONFIG_ACPI */
2747
2748static void spi_controller_release(struct device *dev)
2749{
2750 struct spi_controller *ctlr;
2751
2752 ctlr = container_of(dev, struct spi_controller, dev);
2753 kfree(ctlr);
2754}
2755
2756static struct class spi_master_class = {
2757 .name = "spi_master",
2758 .owner = THIS_MODULE,
2759 .dev_release = spi_controller_release,
2760 .dev_groups = spi_master_groups,
2761};
2762
2763#ifdef CONFIG_SPI_SLAVE
2764/**
2765 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2766 * controller
2767 * @spi: device used for the current transfer
2768 */
2769int spi_slave_abort(struct spi_device *spi)
2770{
2771 struct spi_controller *ctlr = spi->controller;
2772
2773 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2774 return ctlr->slave_abort(ctlr);
2775
2776 return -ENOTSUPP;
2777}
2778EXPORT_SYMBOL_GPL(spi_slave_abort);
2779
2780int spi_target_abort(struct spi_device *spi)
2781{
2782 struct spi_controller *ctlr = spi->controller;
2783
2784 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2785 return ctlr->target_abort(ctlr);
2786
2787 return -ENOTSUPP;
2788}
2789EXPORT_SYMBOL_GPL(spi_target_abort);
2790
2791static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2792 char *buf)
2793{
2794 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2795 dev);
2796 struct device *child;
2797
2798 child = device_find_any_child(&ctlr->dev);
2799 return sprintf(buf, "%s\n",
2800 child ? to_spi_device(child)->modalias : NULL);
2801}
2802
2803static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2804 const char *buf, size_t count)
2805{
2806 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2807 dev);
2808 struct spi_device *spi;
2809 struct device *child;
2810 char name[32];
2811 int rc;
2812
2813 rc = sscanf(buf, "%31s", name);
2814 if (rc != 1 || !name[0])
2815 return -EINVAL;
2816
2817 child = device_find_any_child(&ctlr->dev);
2818 if (child) {
2819 /* Remove registered slave */
2820 device_unregister(child);
2821 put_device(child);
2822 }
2823
2824 if (strcmp(name, "(null)")) {
2825 /* Register new slave */
2826 spi = spi_alloc_device(ctlr);
2827 if (!spi)
2828 return -ENOMEM;
2829
2830 strscpy(spi->modalias, name, sizeof(spi->modalias));
2831
2832 rc = spi_add_device(spi);
2833 if (rc) {
2834 spi_dev_put(spi);
2835 return rc;
2836 }
2837 }
2838
2839 return count;
2840}
2841
2842static DEVICE_ATTR_RW(slave);
2843
2844static struct attribute *spi_slave_attrs[] = {
2845 &dev_attr_slave.attr,
2846 NULL,
2847};
2848
2849static const struct attribute_group spi_slave_group = {
2850 .attrs = spi_slave_attrs,
2851};
2852
2853static const struct attribute_group *spi_slave_groups[] = {
2854 &spi_controller_statistics_group,
2855 &spi_slave_group,
2856 NULL,
2857};
2858
2859static struct class spi_slave_class = {
2860 .name = "spi_slave",
2861 .owner = THIS_MODULE,
2862 .dev_release = spi_controller_release,
2863 .dev_groups = spi_slave_groups,
2864};
2865#else
2866extern struct class spi_slave_class; /* dummy */
2867#endif
2868
2869/**
2870 * __spi_alloc_controller - allocate an SPI master or slave controller
2871 * @dev: the controller, possibly using the platform_bus
2872 * @size: how much zeroed driver-private data to allocate; the pointer to this
2873 * memory is in the driver_data field of the returned device, accessible
2874 * with spi_controller_get_devdata(); the memory is cacheline aligned;
2875 * drivers granting DMA access to portions of their private data need to
2876 * round up @size using ALIGN(size, dma_get_cache_alignment()).
2877 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2878 * slave (true) controller
2879 * Context: can sleep
2880 *
2881 * This call is used only by SPI controller drivers, which are the
2882 * only ones directly touching chip registers. It's how they allocate
2883 * an spi_controller structure, prior to calling spi_register_controller().
2884 *
2885 * This must be called from context that can sleep.
2886 *
2887 * The caller is responsible for assigning the bus number and initializing the
2888 * controller's methods before calling spi_register_controller(); and (after
2889 * errors adding the device) calling spi_controller_put() to prevent a memory
2890 * leak.
2891 *
2892 * Return: the SPI controller structure on success, else NULL.
2893 */
2894struct spi_controller *__spi_alloc_controller(struct device *dev,
2895 unsigned int size, bool slave)
2896{
2897 struct spi_controller *ctlr;
2898 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2899
2900 if (!dev)
2901 return NULL;
2902
2903 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2904 if (!ctlr)
2905 return NULL;
2906
2907 device_initialize(&ctlr->dev);
2908 INIT_LIST_HEAD(&ctlr->queue);
2909 spin_lock_init(&ctlr->queue_lock);
2910 spin_lock_init(&ctlr->bus_lock_spinlock);
2911 mutex_init(&ctlr->bus_lock_mutex);
2912 mutex_init(&ctlr->io_mutex);
2913 mutex_init(&ctlr->add_lock);
2914 ctlr->bus_num = -1;
2915 ctlr->num_chipselect = 1;
2916 ctlr->slave = slave;
2917 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2918 ctlr->dev.class = &spi_slave_class;
2919 else
2920 ctlr->dev.class = &spi_master_class;
2921 ctlr->dev.parent = dev;
2922 pm_suspend_ignore_children(&ctlr->dev, true);
2923 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2924
2925 return ctlr;
2926}
2927EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2928
2929static void devm_spi_release_controller(struct device *dev, void *ctlr)
2930{
2931 spi_controller_put(*(struct spi_controller **)ctlr);
2932}
2933
2934/**
2935 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2936 * @dev: physical device of SPI controller
2937 * @size: how much zeroed driver-private data to allocate
2938 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2939 * Context: can sleep
2940 *
2941 * Allocate an SPI controller and automatically release a reference on it
2942 * when @dev is unbound from its driver. Drivers are thus relieved from
2943 * having to call spi_controller_put().
2944 *
2945 * The arguments to this function are identical to __spi_alloc_controller().
2946 *
2947 * Return: the SPI controller structure on success, else NULL.
2948 */
2949struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2950 unsigned int size,
2951 bool slave)
2952{
2953 struct spi_controller **ptr, *ctlr;
2954
2955 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2956 GFP_KERNEL);
2957 if (!ptr)
2958 return NULL;
2959
2960 ctlr = __spi_alloc_controller(dev, size, slave);
2961 if (ctlr) {
2962 ctlr->devm_allocated = true;
2963 *ptr = ctlr;
2964 devres_add(dev, ptr);
2965 } else {
2966 devres_free(ptr);
2967 }
2968
2969 return ctlr;
2970}
2971EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2972
2973/**
2974 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2975 * @ctlr: The SPI master to grab GPIO descriptors for
2976 */
2977static int spi_get_gpio_descs(struct spi_controller *ctlr)
2978{
2979 int nb, i;
2980 struct gpio_desc **cs;
2981 struct device *dev = &ctlr->dev;
2982 unsigned long native_cs_mask = 0;
2983 unsigned int num_cs_gpios = 0;
2984
2985 nb = gpiod_count(dev, "cs");
2986 if (nb < 0) {
2987 /* No GPIOs at all is fine, else return the error */
2988 if (nb == -ENOENT)
2989 return 0;
2990 return nb;
2991 }
2992
2993 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2994
2995 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2996 GFP_KERNEL);
2997 if (!cs)
2998 return -ENOMEM;
2999 ctlr->cs_gpiods = cs;
3000
3001 for (i = 0; i < nb; i++) {
3002 /*
3003 * Most chipselects are active low, the inverted
3004 * semantics are handled by special quirks in gpiolib,
3005 * so initializing them GPIOD_OUT_LOW here means
3006 * "unasserted", in most cases this will drive the physical
3007 * line high.
3008 */
3009 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3010 GPIOD_OUT_LOW);
3011 if (IS_ERR(cs[i]))
3012 return PTR_ERR(cs[i]);
3013
3014 if (cs[i]) {
3015 /*
3016 * If we find a CS GPIO, name it after the device and
3017 * chip select line.
3018 */
3019 char *gpioname;
3020
3021 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3022 dev_name(dev), i);
3023 if (!gpioname)
3024 return -ENOMEM;
3025 gpiod_set_consumer_name(cs[i], gpioname);
3026 num_cs_gpios++;
3027 continue;
3028 }
3029
3030 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3031 dev_err(dev, "Invalid native chip select %d\n", i);
3032 return -EINVAL;
3033 }
3034 native_cs_mask |= BIT(i);
3035 }
3036
3037 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3038
3039 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
3040 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3041 dev_err(dev, "No unused native chip select available\n");
3042 return -EINVAL;
3043 }
3044
3045 return 0;
3046}
3047
3048static int spi_controller_check_ops(struct spi_controller *ctlr)
3049{
3050 /*
3051 * The controller may implement only the high-level SPI-memory like
3052 * operations if it does not support regular SPI transfers, and this is
3053 * valid use case.
3054 * If ->mem_ops is NULL, we request that at least one of the
3055 * ->transfer_xxx() method be implemented.
3056 */
3057 if (ctlr->mem_ops) {
3058 if (!ctlr->mem_ops->exec_op)
3059 return -EINVAL;
3060 } else if (!ctlr->transfer && !ctlr->transfer_one &&
3061 !ctlr->transfer_one_message) {
3062 return -EINVAL;
3063 }
3064
3065 return 0;
3066}
3067
3068/**
3069 * spi_register_controller - register SPI master or slave controller
3070 * @ctlr: initialized master, originally from spi_alloc_master() or
3071 * spi_alloc_slave()
3072 * Context: can sleep
3073 *
3074 * SPI controllers connect to their drivers using some non-SPI bus,
3075 * such as the platform bus. The final stage of probe() in that code
3076 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3077 *
3078 * SPI controllers use board specific (often SOC specific) bus numbers,
3079 * and board-specific addressing for SPI devices combines those numbers
3080 * with chip select numbers. Since SPI does not directly support dynamic
3081 * device identification, boards need configuration tables telling which
3082 * chip is at which address.
3083 *
3084 * This must be called from context that can sleep. It returns zero on
3085 * success, else a negative error code (dropping the controller's refcount).
3086 * After a successful return, the caller is responsible for calling
3087 * spi_unregister_controller().
3088 *
3089 * Return: zero on success, else a negative error code.
3090 */
3091int spi_register_controller(struct spi_controller *ctlr)
3092{
3093 struct device *dev = ctlr->dev.parent;
3094 struct boardinfo *bi;
3095 int status;
3096 int id, first_dynamic;
3097
3098 if (!dev)
3099 return -ENODEV;
3100
3101 /*
3102 * Make sure all necessary hooks are implemented before registering
3103 * the SPI controller.
3104 */
3105 status = spi_controller_check_ops(ctlr);
3106 if (status)
3107 return status;
3108
3109 if (ctlr->bus_num >= 0) {
3110 /* Devices with a fixed bus num must check-in with the num */
3111 mutex_lock(&board_lock);
3112 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3113 ctlr->bus_num + 1, GFP_KERNEL);
3114 mutex_unlock(&board_lock);
3115 if (WARN(id < 0, "couldn't get idr"))
3116 return id == -ENOSPC ? -EBUSY : id;
3117 ctlr->bus_num = id;
3118 } else if (ctlr->dev.of_node) {
3119 /* Allocate dynamic bus number using Linux idr */
3120 id = of_alias_get_id(ctlr->dev.of_node, "spi");
3121 if (id >= 0) {
3122 ctlr->bus_num = id;
3123 mutex_lock(&board_lock);
3124 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3125 ctlr->bus_num + 1, GFP_KERNEL);
3126 mutex_unlock(&board_lock);
3127 if (WARN(id < 0, "couldn't get idr"))
3128 return id == -ENOSPC ? -EBUSY : id;
3129 }
3130 }
3131 if (ctlr->bus_num < 0) {
3132 first_dynamic = of_alias_get_highest_id("spi");
3133 if (first_dynamic < 0)
3134 first_dynamic = 0;
3135 else
3136 first_dynamic++;
3137
3138 mutex_lock(&board_lock);
3139 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
3140 0, GFP_KERNEL);
3141 mutex_unlock(&board_lock);
3142 if (WARN(id < 0, "couldn't get idr"))
3143 return id;
3144 ctlr->bus_num = id;
3145 }
3146 ctlr->bus_lock_flag = 0;
3147 init_completion(&ctlr->xfer_completion);
3148 init_completion(&ctlr->cur_msg_completion);
3149 if (!ctlr->max_dma_len)
3150 ctlr->max_dma_len = INT_MAX;
3151
3152 /*
3153 * Register the device, then userspace will see it.
3154 * Registration fails if the bus ID is in use.
3155 */
3156 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3157
3158 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3159 status = spi_get_gpio_descs(ctlr);
3160 if (status)
3161 goto free_bus_id;
3162 /*
3163 * A controller using GPIO descriptors always
3164 * supports SPI_CS_HIGH if need be.
3165 */
3166 ctlr->mode_bits |= SPI_CS_HIGH;
3167 }
3168
3169 /*
3170 * Even if it's just one always-selected device, there must
3171 * be at least one chipselect.
3172 */
3173 if (!ctlr->num_chipselect) {
3174 status = -EINVAL;
3175 goto free_bus_id;
3176 }
3177
3178 /* Setting last_cs to -1 means no chip selected */
3179 ctlr->last_cs = -1;
3180
3181 status = device_add(&ctlr->dev);
3182 if (status < 0)
3183 goto free_bus_id;
3184 dev_dbg(dev, "registered %s %s\n",
3185 spi_controller_is_slave(ctlr) ? "slave" : "master",
3186 dev_name(&ctlr->dev));
3187
3188 /*
3189 * If we're using a queued driver, start the queue. Note that we don't
3190 * need the queueing logic if the driver is only supporting high-level
3191 * memory operations.
3192 */
3193 if (ctlr->transfer) {
3194 dev_info(dev, "controller is unqueued, this is deprecated\n");
3195 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3196 status = spi_controller_initialize_queue(ctlr);
3197 if (status) {
3198 device_del(&ctlr->dev);
3199 goto free_bus_id;
3200 }
3201 }
3202 /* Add statistics */
3203 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3204 if (!ctlr->pcpu_statistics) {
3205 dev_err(dev, "Error allocating per-cpu statistics\n");
3206 status = -ENOMEM;
3207 goto destroy_queue;
3208 }
3209
3210 mutex_lock(&board_lock);
3211 list_add_tail(&ctlr->list, &spi_controller_list);
3212 list_for_each_entry(bi, &board_list, list)
3213 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3214 mutex_unlock(&board_lock);
3215
3216 /* Register devices from the device tree and ACPI */
3217 of_register_spi_devices(ctlr);
3218 acpi_register_spi_devices(ctlr);
3219 return status;
3220
3221destroy_queue:
3222 spi_destroy_queue(ctlr);
3223free_bus_id:
3224 mutex_lock(&board_lock);
3225 idr_remove(&spi_master_idr, ctlr->bus_num);
3226 mutex_unlock(&board_lock);
3227 return status;
3228}
3229EXPORT_SYMBOL_GPL(spi_register_controller);
3230
3231static void devm_spi_unregister(struct device *dev, void *res)
3232{
3233 spi_unregister_controller(*(struct spi_controller **)res);
3234}
3235
3236/**
3237 * devm_spi_register_controller - register managed SPI master or slave
3238 * controller
3239 * @dev: device managing SPI controller
3240 * @ctlr: initialized controller, originally from spi_alloc_master() or
3241 * spi_alloc_slave()
3242 * Context: can sleep
3243 *
3244 * Register a SPI device as with spi_register_controller() which will
3245 * automatically be unregistered and freed.
3246 *
3247 * Return: zero on success, else a negative error code.
3248 */
3249int devm_spi_register_controller(struct device *dev,
3250 struct spi_controller *ctlr)
3251{
3252 struct spi_controller **ptr;
3253 int ret;
3254
3255 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3256 if (!ptr)
3257 return -ENOMEM;
3258
3259 ret = spi_register_controller(ctlr);
3260 if (!ret) {
3261 *ptr = ctlr;
3262 devres_add(dev, ptr);
3263 } else {
3264 devres_free(ptr);
3265 }
3266
3267 return ret;
3268}
3269EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3270
3271static int __unregister(struct device *dev, void *null)
3272{
3273 spi_unregister_device(to_spi_device(dev));
3274 return 0;
3275}
3276
3277/**
3278 * spi_unregister_controller - unregister SPI master or slave controller
3279 * @ctlr: the controller being unregistered
3280 * Context: can sleep
3281 *
3282 * This call is used only by SPI controller drivers, which are the
3283 * only ones directly touching chip registers.
3284 *
3285 * This must be called from context that can sleep.
3286 *
3287 * Note that this function also drops a reference to the controller.
3288 */
3289void spi_unregister_controller(struct spi_controller *ctlr)
3290{
3291 struct spi_controller *found;
3292 int id = ctlr->bus_num;
3293
3294 /* Prevent addition of new devices, unregister existing ones */
3295 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3296 mutex_lock(&ctlr->add_lock);
3297
3298 device_for_each_child(&ctlr->dev, NULL, __unregister);
3299
3300 /* First make sure that this controller was ever added */
3301 mutex_lock(&board_lock);
3302 found = idr_find(&spi_master_idr, id);
3303 mutex_unlock(&board_lock);
3304 if (ctlr->queued) {
3305 if (spi_destroy_queue(ctlr))
3306 dev_err(&ctlr->dev, "queue remove failed\n");
3307 }
3308 mutex_lock(&board_lock);
3309 list_del(&ctlr->list);
3310 mutex_unlock(&board_lock);
3311
3312 device_del(&ctlr->dev);
3313
3314 /* Free bus id */
3315 mutex_lock(&board_lock);
3316 if (found == ctlr)
3317 idr_remove(&spi_master_idr, id);
3318 mutex_unlock(&board_lock);
3319
3320 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3321 mutex_unlock(&ctlr->add_lock);
3322
3323 /* Release the last reference on the controller if its driver
3324 * has not yet been converted to devm_spi_alloc_master/slave().
3325 */
3326 if (!ctlr->devm_allocated)
3327 put_device(&ctlr->dev);
3328}
3329EXPORT_SYMBOL_GPL(spi_unregister_controller);
3330
3331int spi_controller_suspend(struct spi_controller *ctlr)
3332{
3333 int ret;
3334
3335 /* Basically no-ops for non-queued controllers */
3336 if (!ctlr->queued)
3337 return 0;
3338
3339 ret = spi_stop_queue(ctlr);
3340 if (ret)
3341 dev_err(&ctlr->dev, "queue stop failed\n");
3342
3343 return ret;
3344}
3345EXPORT_SYMBOL_GPL(spi_controller_suspend);
3346
3347int spi_controller_resume(struct spi_controller *ctlr)
3348{
3349 int ret;
3350
3351 if (!ctlr->queued)
3352 return 0;
3353
3354 ret = spi_start_queue(ctlr);
3355 if (ret)
3356 dev_err(&ctlr->dev, "queue restart failed\n");
3357
3358 return ret;
3359}
3360EXPORT_SYMBOL_GPL(spi_controller_resume);
3361
3362/*-------------------------------------------------------------------------*/
3363
3364/* Core methods for spi_message alterations */
3365
3366static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3367 struct spi_message *msg,
3368 void *res)
3369{
3370 struct spi_replaced_transfers *rxfer = res;
3371 size_t i;
3372
3373 /* Call extra callback if requested */
3374 if (rxfer->release)
3375 rxfer->release(ctlr, msg, res);
3376
3377 /* Insert replaced transfers back into the message */
3378 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3379
3380 /* Remove the formerly inserted entries */
3381 for (i = 0; i < rxfer->inserted; i++)
3382 list_del(&rxfer->inserted_transfers[i].transfer_list);
3383}
3384
3385/**
3386 * spi_replace_transfers - replace transfers with several transfers
3387 * and register change with spi_message.resources
3388 * @msg: the spi_message we work upon
3389 * @xfer_first: the first spi_transfer we want to replace
3390 * @remove: number of transfers to remove
3391 * @insert: the number of transfers we want to insert instead
3392 * @release: extra release code necessary in some circumstances
3393 * @extradatasize: extra data to allocate (with alignment guarantees
3394 * of struct @spi_transfer)
3395 * @gfp: gfp flags
3396 *
3397 * Returns: pointer to @spi_replaced_transfers,
3398 * PTR_ERR(...) in case of errors.
3399 */
3400static struct spi_replaced_transfers *spi_replace_transfers(
3401 struct spi_message *msg,
3402 struct spi_transfer *xfer_first,
3403 size_t remove,
3404 size_t insert,
3405 spi_replaced_release_t release,
3406 size_t extradatasize,
3407 gfp_t gfp)
3408{
3409 struct spi_replaced_transfers *rxfer;
3410 struct spi_transfer *xfer;
3411 size_t i;
3412
3413 /* Allocate the structure using spi_res */
3414 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3415 struct_size(rxfer, inserted_transfers, insert)
3416 + extradatasize,
3417 gfp);
3418 if (!rxfer)
3419 return ERR_PTR(-ENOMEM);
3420
3421 /* The release code to invoke before running the generic release */
3422 rxfer->release = release;
3423
3424 /* Assign extradata */
3425 if (extradatasize)
3426 rxfer->extradata =
3427 &rxfer->inserted_transfers[insert];
3428
3429 /* Init the replaced_transfers list */
3430 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3431
3432 /*
3433 * Assign the list_entry after which we should reinsert
3434 * the @replaced_transfers - it may be spi_message.messages!
3435 */
3436 rxfer->replaced_after = xfer_first->transfer_list.prev;
3437
3438 /* Remove the requested number of transfers */
3439 for (i = 0; i < remove; i++) {
3440 /*
3441 * If the entry after replaced_after it is msg->transfers
3442 * then we have been requested to remove more transfers
3443 * than are in the list.
3444 */
3445 if (rxfer->replaced_after->next == &msg->transfers) {
3446 dev_err(&msg->spi->dev,
3447 "requested to remove more spi_transfers than are available\n");
3448 /* Insert replaced transfers back into the message */
3449 list_splice(&rxfer->replaced_transfers,
3450 rxfer->replaced_after);
3451
3452 /* Free the spi_replace_transfer structure... */
3453 spi_res_free(rxfer);
3454
3455 /* ...and return with an error */
3456 return ERR_PTR(-EINVAL);
3457 }
3458
3459 /*
3460 * Remove the entry after replaced_after from list of
3461 * transfers and add it to list of replaced_transfers.
3462 */
3463 list_move_tail(rxfer->replaced_after->next,
3464 &rxfer->replaced_transfers);
3465 }
3466
3467 /*
3468 * Create copy of the given xfer with identical settings
3469 * based on the first transfer to get removed.
3470 */
3471 for (i = 0; i < insert; i++) {
3472 /* We need to run in reverse order */
3473 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3474
3475 /* Copy all spi_transfer data */
3476 memcpy(xfer, xfer_first, sizeof(*xfer));
3477
3478 /* Add to list */
3479 list_add(&xfer->transfer_list, rxfer->replaced_after);
3480
3481 /* Clear cs_change and delay for all but the last */
3482 if (i) {
3483 xfer->cs_change = false;
3484 xfer->delay.value = 0;
3485 }
3486 }
3487
3488 /* Set up inserted... */
3489 rxfer->inserted = insert;
3490
3491 /* ...and register it with spi_res/spi_message */
3492 spi_res_add(msg, rxfer);
3493
3494 return rxfer;
3495}
3496
3497static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3498 struct spi_message *msg,
3499 struct spi_transfer **xferp,
3500 size_t maxsize,
3501 gfp_t gfp)
3502{
3503 struct spi_transfer *xfer = *xferp, *xfers;
3504 struct spi_replaced_transfers *srt;
3505 size_t offset;
3506 size_t count, i;
3507
3508 /* Calculate how many we have to replace */
3509 count = DIV_ROUND_UP(xfer->len, maxsize);
3510
3511 /* Create replacement */
3512 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3513 if (IS_ERR(srt))
3514 return PTR_ERR(srt);
3515 xfers = srt->inserted_transfers;
3516
3517 /*
3518 * Now handle each of those newly inserted spi_transfers.
3519 * Note that the replacements spi_transfers all are preset
3520 * to the same values as *xferp, so tx_buf, rx_buf and len
3521 * are all identical (as well as most others)
3522 * so we just have to fix up len and the pointers.
3523 *
3524 * This also includes support for the depreciated
3525 * spi_message.is_dma_mapped interface.
3526 */
3527
3528 /*
3529 * The first transfer just needs the length modified, so we
3530 * run it outside the loop.
3531 */
3532 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3533
3534 /* All the others need rx_buf/tx_buf also set */
3535 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3536 /* Update rx_buf, tx_buf and dma */
3537 if (xfers[i].rx_buf)
3538 xfers[i].rx_buf += offset;
3539 if (xfers[i].rx_dma)
3540 xfers[i].rx_dma += offset;
3541 if (xfers[i].tx_buf)
3542 xfers[i].tx_buf += offset;
3543 if (xfers[i].tx_dma)
3544 xfers[i].tx_dma += offset;
3545
3546 /* Update length */
3547 xfers[i].len = min(maxsize, xfers[i].len - offset);
3548 }
3549
3550 /*
3551 * We set up xferp to the last entry we have inserted,
3552 * so that we skip those already split transfers.
3553 */
3554 *xferp = &xfers[count - 1];
3555
3556 /* Increment statistics counters */
3557 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3558 transfers_split_maxsize);
3559 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3560 transfers_split_maxsize);
3561
3562 return 0;
3563}
3564
3565/**
3566 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3567 * when an individual transfer exceeds a
3568 * certain size
3569 * @ctlr: the @spi_controller for this transfer
3570 * @msg: the @spi_message to transform
3571 * @maxsize: the maximum when to apply this
3572 * @gfp: GFP allocation flags
3573 *
3574 * Return: status of transformation
3575 */
3576int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3577 struct spi_message *msg,
3578 size_t maxsize,
3579 gfp_t gfp)
3580{
3581 struct spi_transfer *xfer;
3582 int ret;
3583
3584 /*
3585 * Iterate over the transfer_list,
3586 * but note that xfer is advanced to the last transfer inserted
3587 * to avoid checking sizes again unnecessarily (also xfer does
3588 * potentially belong to a different list by the time the
3589 * replacement has happened).
3590 */
3591 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3592 if (xfer->len > maxsize) {
3593 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3594 maxsize, gfp);
3595 if (ret)
3596 return ret;
3597 }
3598 }
3599
3600 return 0;
3601}
3602EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3603
3604/*-------------------------------------------------------------------------*/
3605
3606/* Core methods for SPI controller protocol drivers. Some of the
3607 * other core methods are currently defined as inline functions.
3608 */
3609
3610static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3611 u8 bits_per_word)
3612{
3613 if (ctlr->bits_per_word_mask) {
3614 /* Only 32 bits fit in the mask */
3615 if (bits_per_word > 32)
3616 return -EINVAL;
3617 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3618 return -EINVAL;
3619 }
3620
3621 return 0;
3622}
3623
3624/**
3625 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3626 * @spi: the device that requires specific CS timing configuration
3627 *
3628 * Return: zero on success, else a negative error code.
3629 */
3630static int spi_set_cs_timing(struct spi_device *spi)
3631{
3632 struct device *parent = spi->controller->dev.parent;
3633 int status = 0;
3634
3635 if (spi->controller->set_cs_timing && !spi->cs_gpiod) {
3636 if (spi->controller->auto_runtime_pm) {
3637 status = pm_runtime_get_sync(parent);
3638 if (status < 0) {
3639 pm_runtime_put_noidle(parent);
3640 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3641 status);
3642 return status;
3643 }
3644
3645 status = spi->controller->set_cs_timing(spi);
3646 pm_runtime_mark_last_busy(parent);
3647 pm_runtime_put_autosuspend(parent);
3648 } else {
3649 status = spi->controller->set_cs_timing(spi);
3650 }
3651 }
3652 return status;
3653}
3654
3655/**
3656 * spi_setup - setup SPI mode and clock rate
3657 * @spi: the device whose settings are being modified
3658 * Context: can sleep, and no requests are queued to the device
3659 *
3660 * SPI protocol drivers may need to update the transfer mode if the
3661 * device doesn't work with its default. They may likewise need
3662 * to update clock rates or word sizes from initial values. This function
3663 * changes those settings, and must be called from a context that can sleep.
3664 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3665 * effect the next time the device is selected and data is transferred to
3666 * or from it. When this function returns, the spi device is deselected.
3667 *
3668 * Note that this call will fail if the protocol driver specifies an option
3669 * that the underlying controller or its driver does not support. For
3670 * example, not all hardware supports wire transfers using nine bit words,
3671 * LSB-first wire encoding, or active-high chipselects.
3672 *
3673 * Return: zero on success, else a negative error code.
3674 */
3675int spi_setup(struct spi_device *spi)
3676{
3677 unsigned bad_bits, ugly_bits;
3678 int status = 0;
3679
3680 /*
3681 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3682 * are set at the same time.
3683 */
3684 if ((hweight_long(spi->mode &
3685 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3686 (hweight_long(spi->mode &
3687 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3688 dev_err(&spi->dev,
3689 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3690 return -EINVAL;
3691 }
3692 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3693 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3694 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3695 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3696 return -EINVAL;
3697 /*
3698 * Help drivers fail *cleanly* when they need options
3699 * that aren't supported with their current controller.
3700 * SPI_CS_WORD has a fallback software implementation,
3701 * so it is ignored here.
3702 */
3703 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3704 SPI_NO_TX | SPI_NO_RX);
3705 ugly_bits = bad_bits &
3706 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3707 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3708 if (ugly_bits) {
3709 dev_warn(&spi->dev,
3710 "setup: ignoring unsupported mode bits %x\n",
3711 ugly_bits);
3712 spi->mode &= ~ugly_bits;
3713 bad_bits &= ~ugly_bits;
3714 }
3715 if (bad_bits) {
3716 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3717 bad_bits);
3718 return -EINVAL;
3719 }
3720
3721 if (!spi->bits_per_word) {
3722 spi->bits_per_word = 8;
3723 } else {
3724 /*
3725 * Some controllers may not support the default 8 bits-per-word
3726 * so only perform the check when this is explicitly provided.
3727 */
3728 status = __spi_validate_bits_per_word(spi->controller,
3729 spi->bits_per_word);
3730 if (status)
3731 return status;
3732 }
3733
3734 if (spi->controller->max_speed_hz &&
3735 (!spi->max_speed_hz ||
3736 spi->max_speed_hz > spi->controller->max_speed_hz))
3737 spi->max_speed_hz = spi->controller->max_speed_hz;
3738
3739 mutex_lock(&spi->controller->io_mutex);
3740
3741 if (spi->controller->setup) {
3742 status = spi->controller->setup(spi);
3743 if (status) {
3744 mutex_unlock(&spi->controller->io_mutex);
3745 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3746 status);
3747 return status;
3748 }
3749 }
3750
3751 status = spi_set_cs_timing(spi);
3752 if (status) {
3753 mutex_unlock(&spi->controller->io_mutex);
3754 return status;
3755 }
3756
3757 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3758 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3759 if (status < 0) {
3760 mutex_unlock(&spi->controller->io_mutex);
3761 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3762 status);
3763 return status;
3764 }
3765
3766 /*
3767 * We do not want to return positive value from pm_runtime_get,
3768 * there are many instances of devices calling spi_setup() and
3769 * checking for a non-zero return value instead of a negative
3770 * return value.
3771 */
3772 status = 0;
3773
3774 spi_set_cs(spi, false, true);
3775 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3776 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3777 } else {
3778 spi_set_cs(spi, false, true);
3779 }
3780
3781 mutex_unlock(&spi->controller->io_mutex);
3782
3783 if (spi->rt && !spi->controller->rt) {
3784 spi->controller->rt = true;
3785 spi_set_thread_rt(spi->controller);
3786 }
3787
3788 trace_spi_setup(spi, status);
3789
3790 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3791 spi->mode & SPI_MODE_X_MASK,
3792 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3793 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3794 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3795 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3796 spi->bits_per_word, spi->max_speed_hz,
3797 status);
3798
3799 return status;
3800}
3801EXPORT_SYMBOL_GPL(spi_setup);
3802
3803static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3804 struct spi_device *spi)
3805{
3806 int delay1, delay2;
3807
3808 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3809 if (delay1 < 0)
3810 return delay1;
3811
3812 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3813 if (delay2 < 0)
3814 return delay2;
3815
3816 if (delay1 < delay2)
3817 memcpy(&xfer->word_delay, &spi->word_delay,
3818 sizeof(xfer->word_delay));
3819
3820 return 0;
3821}
3822
3823static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3824{
3825 struct spi_controller *ctlr = spi->controller;
3826 struct spi_transfer *xfer;
3827 int w_size;
3828
3829 if (list_empty(&message->transfers))
3830 return -EINVAL;
3831
3832 /*
3833 * If an SPI controller does not support toggling the CS line on each
3834 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3835 * for the CS line, we can emulate the CS-per-word hardware function by
3836 * splitting transfers into one-word transfers and ensuring that
3837 * cs_change is set for each transfer.
3838 */
3839 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3840 spi->cs_gpiod)) {
3841 size_t maxsize;
3842 int ret;
3843
3844 maxsize = (spi->bits_per_word + 7) / 8;
3845
3846 /* spi_split_transfers_maxsize() requires message->spi */
3847 message->spi = spi;
3848
3849 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3850 GFP_KERNEL);
3851 if (ret)
3852 return ret;
3853
3854 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3855 /* Don't change cs_change on the last entry in the list */
3856 if (list_is_last(&xfer->transfer_list, &message->transfers))
3857 break;
3858 xfer->cs_change = 1;
3859 }
3860 }
3861
3862 /*
3863 * Half-duplex links include original MicroWire, and ones with
3864 * only one data pin like SPI_3WIRE (switches direction) or where
3865 * either MOSI or MISO is missing. They can also be caused by
3866 * software limitations.
3867 */
3868 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3869 (spi->mode & SPI_3WIRE)) {
3870 unsigned flags = ctlr->flags;
3871
3872 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3873 if (xfer->rx_buf && xfer->tx_buf)
3874 return -EINVAL;
3875 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3876 return -EINVAL;
3877 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3878 return -EINVAL;
3879 }
3880 }
3881
3882 /*
3883 * Set transfer bits_per_word and max speed as spi device default if
3884 * it is not set for this transfer.
3885 * Set transfer tx_nbits and rx_nbits as single transfer default
3886 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3887 * Ensure transfer word_delay is at least as long as that required by
3888 * device itself.
3889 */
3890 message->frame_length = 0;
3891 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3892 xfer->effective_speed_hz = 0;
3893 message->frame_length += xfer->len;
3894 if (!xfer->bits_per_word)
3895 xfer->bits_per_word = spi->bits_per_word;
3896
3897 if (!xfer->speed_hz)
3898 xfer->speed_hz = spi->max_speed_hz;
3899
3900 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3901 xfer->speed_hz = ctlr->max_speed_hz;
3902
3903 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3904 return -EINVAL;
3905
3906 /*
3907 * SPI transfer length should be multiple of SPI word size
3908 * where SPI word size should be power-of-two multiple.
3909 */
3910 if (xfer->bits_per_word <= 8)
3911 w_size = 1;
3912 else if (xfer->bits_per_word <= 16)
3913 w_size = 2;
3914 else
3915 w_size = 4;
3916
3917 /* No partial transfers accepted */
3918 if (xfer->len % w_size)
3919 return -EINVAL;
3920
3921 if (xfer->speed_hz && ctlr->min_speed_hz &&
3922 xfer->speed_hz < ctlr->min_speed_hz)
3923 return -EINVAL;
3924
3925 if (xfer->tx_buf && !xfer->tx_nbits)
3926 xfer->tx_nbits = SPI_NBITS_SINGLE;
3927 if (xfer->rx_buf && !xfer->rx_nbits)
3928 xfer->rx_nbits = SPI_NBITS_SINGLE;
3929 /*
3930 * Check transfer tx/rx_nbits:
3931 * 1. check the value matches one of single, dual and quad
3932 * 2. check tx/rx_nbits match the mode in spi_device
3933 */
3934 if (xfer->tx_buf) {
3935 if (spi->mode & SPI_NO_TX)
3936 return -EINVAL;
3937 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3938 xfer->tx_nbits != SPI_NBITS_DUAL &&
3939 xfer->tx_nbits != SPI_NBITS_QUAD)
3940 return -EINVAL;
3941 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3942 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3943 return -EINVAL;
3944 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3945 !(spi->mode & SPI_TX_QUAD))
3946 return -EINVAL;
3947 }
3948 /* Check transfer rx_nbits */
3949 if (xfer->rx_buf) {
3950 if (spi->mode & SPI_NO_RX)
3951 return -EINVAL;
3952 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3953 xfer->rx_nbits != SPI_NBITS_DUAL &&
3954 xfer->rx_nbits != SPI_NBITS_QUAD)
3955 return -EINVAL;
3956 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3957 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3958 return -EINVAL;
3959 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3960 !(spi->mode & SPI_RX_QUAD))
3961 return -EINVAL;
3962 }
3963
3964 if (_spi_xfer_word_delay_update(xfer, spi))
3965 return -EINVAL;
3966 }
3967
3968 message->status = -EINPROGRESS;
3969
3970 return 0;
3971}
3972
3973static int __spi_async(struct spi_device *spi, struct spi_message *message)
3974{
3975 struct spi_controller *ctlr = spi->controller;
3976 struct spi_transfer *xfer;
3977
3978 /*
3979 * Some controllers do not support doing regular SPI transfers. Return
3980 * ENOTSUPP when this is the case.
3981 */
3982 if (!ctlr->transfer)
3983 return -ENOTSUPP;
3984
3985 message->spi = spi;
3986
3987 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
3988 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
3989
3990 trace_spi_message_submit(message);
3991
3992 if (!ctlr->ptp_sts_supported) {
3993 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3994 xfer->ptp_sts_word_pre = 0;
3995 ptp_read_system_prets(xfer->ptp_sts);
3996 }
3997 }
3998
3999 return ctlr->transfer(spi, message);
4000}
4001
4002/**
4003 * spi_async - asynchronous SPI transfer
4004 * @spi: device with which data will be exchanged
4005 * @message: describes the data transfers, including completion callback
4006 * Context: any (irqs may be blocked, etc)
4007 *
4008 * This call may be used in_irq and other contexts which can't sleep,
4009 * as well as from task contexts which can sleep.
4010 *
4011 * The completion callback is invoked in a context which can't sleep.
4012 * Before that invocation, the value of message->status is undefined.
4013 * When the callback is issued, message->status holds either zero (to
4014 * indicate complete success) or a negative error code. After that
4015 * callback returns, the driver which issued the transfer request may
4016 * deallocate the associated memory; it's no longer in use by any SPI
4017 * core or controller driver code.
4018 *
4019 * Note that although all messages to a spi_device are handled in
4020 * FIFO order, messages may go to different devices in other orders.
4021 * Some device might be higher priority, or have various "hard" access
4022 * time requirements, for example.
4023 *
4024 * On detection of any fault during the transfer, processing of
4025 * the entire message is aborted, and the device is deselected.
4026 * Until returning from the associated message completion callback,
4027 * no other spi_message queued to that device will be processed.
4028 * (This rule applies equally to all the synchronous transfer calls,
4029 * which are wrappers around this core asynchronous primitive.)
4030 *
4031 * Return: zero on success, else a negative error code.
4032 */
4033int spi_async(struct spi_device *spi, struct spi_message *message)
4034{
4035 struct spi_controller *ctlr = spi->controller;
4036 int ret;
4037 unsigned long flags;
4038
4039 ret = __spi_validate(spi, message);
4040 if (ret != 0)
4041 return ret;
4042
4043 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4044
4045 if (ctlr->bus_lock_flag)
4046 ret = -EBUSY;
4047 else
4048 ret = __spi_async(spi, message);
4049
4050 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4051
4052 return ret;
4053}
4054EXPORT_SYMBOL_GPL(spi_async);
4055
4056/**
4057 * spi_async_locked - version of spi_async with exclusive bus usage
4058 * @spi: device with which data will be exchanged
4059 * @message: describes the data transfers, including completion callback
4060 * Context: any (irqs may be blocked, etc)
4061 *
4062 * This call may be used in_irq and other contexts which can't sleep,
4063 * as well as from task contexts which can sleep.
4064 *
4065 * The completion callback is invoked in a context which can't sleep.
4066 * Before that invocation, the value of message->status is undefined.
4067 * When the callback is issued, message->status holds either zero (to
4068 * indicate complete success) or a negative error code. After that
4069 * callback returns, the driver which issued the transfer request may
4070 * deallocate the associated memory; it's no longer in use by any SPI
4071 * core or controller driver code.
4072 *
4073 * Note that although all messages to a spi_device are handled in
4074 * FIFO order, messages may go to different devices in other orders.
4075 * Some device might be higher priority, or have various "hard" access
4076 * time requirements, for example.
4077 *
4078 * On detection of any fault during the transfer, processing of
4079 * the entire message is aborted, and the device is deselected.
4080 * Until returning from the associated message completion callback,
4081 * no other spi_message queued to that device will be processed.
4082 * (This rule applies equally to all the synchronous transfer calls,
4083 * which are wrappers around this core asynchronous primitive.)
4084 *
4085 * Return: zero on success, else a negative error code.
4086 */
4087static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
4088{
4089 struct spi_controller *ctlr = spi->controller;
4090 int ret;
4091 unsigned long flags;
4092
4093 ret = __spi_validate(spi, message);
4094 if (ret != 0)
4095 return ret;
4096
4097 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4098
4099 ret = __spi_async(spi, message);
4100
4101 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4102
4103 return ret;
4104
4105}
4106
4107static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4108{
4109 bool was_busy;
4110 int ret;
4111
4112 mutex_lock(&ctlr->io_mutex);
4113
4114 was_busy = ctlr->busy;
4115
4116 ctlr->cur_msg = msg;
4117 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4118 if (ret)
4119 goto out;
4120
4121 ctlr->cur_msg = NULL;
4122 ctlr->fallback = false;
4123
4124 if (!was_busy) {
4125 kfree(ctlr->dummy_rx);
4126 ctlr->dummy_rx = NULL;
4127 kfree(ctlr->dummy_tx);
4128 ctlr->dummy_tx = NULL;
4129 if (ctlr->unprepare_transfer_hardware &&
4130 ctlr->unprepare_transfer_hardware(ctlr))
4131 dev_err(&ctlr->dev,
4132 "failed to unprepare transfer hardware\n");
4133 spi_idle_runtime_pm(ctlr);
4134 }
4135
4136out:
4137 mutex_unlock(&ctlr->io_mutex);
4138}
4139
4140/*-------------------------------------------------------------------------*/
4141
4142/*
4143 * Utility methods for SPI protocol drivers, layered on
4144 * top of the core. Some other utility methods are defined as
4145 * inline functions.
4146 */
4147
4148static void spi_complete(void *arg)
4149{
4150 complete(arg);
4151}
4152
4153static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4154{
4155 DECLARE_COMPLETION_ONSTACK(done);
4156 int status;
4157 struct spi_controller *ctlr = spi->controller;
4158
4159 status = __spi_validate(spi, message);
4160 if (status != 0)
4161 return status;
4162
4163 message->spi = spi;
4164
4165 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4166 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4167
4168 /*
4169 * Checking queue_empty here only guarantees async/sync message
4170 * ordering when coming from the same context. It does not need to
4171 * guard against reentrancy from a different context. The io_mutex
4172 * will catch those cases.
4173 */
4174 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4175 message->actual_length = 0;
4176 message->status = -EINPROGRESS;
4177
4178 trace_spi_message_submit(message);
4179
4180 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4181 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4182
4183 __spi_transfer_message_noqueue(ctlr, message);
4184
4185 return message->status;
4186 }
4187
4188 /*
4189 * There are messages in the async queue that could have originated
4190 * from the same context, so we need to preserve ordering.
4191 * Therefor we send the message to the async queue and wait until they
4192 * are completed.
4193 */
4194 message->complete = spi_complete;
4195 message->context = &done;
4196 status = spi_async_locked(spi, message);
4197 if (status == 0) {
4198 wait_for_completion(&done);
4199 status = message->status;
4200 }
4201 message->context = NULL;
4202
4203 return status;
4204}
4205
4206/**
4207 * spi_sync - blocking/synchronous SPI data transfers
4208 * @spi: device with which data will be exchanged
4209 * @message: describes the data transfers
4210 * Context: can sleep
4211 *
4212 * This call may only be used from a context that may sleep. The sleep
4213 * is non-interruptible, and has no timeout. Low-overhead controller
4214 * drivers may DMA directly into and out of the message buffers.
4215 *
4216 * Note that the SPI device's chip select is active during the message,
4217 * and then is normally disabled between messages. Drivers for some
4218 * frequently-used devices may want to minimize costs of selecting a chip,
4219 * by leaving it selected in anticipation that the next message will go
4220 * to the same chip. (That may increase power usage.)
4221 *
4222 * Also, the caller is guaranteeing that the memory associated with the
4223 * message will not be freed before this call returns.
4224 *
4225 * Return: zero on success, else a negative error code.
4226 */
4227int spi_sync(struct spi_device *spi, struct spi_message *message)
4228{
4229 int ret;
4230
4231 mutex_lock(&spi->controller->bus_lock_mutex);
4232 ret = __spi_sync(spi, message);
4233 mutex_unlock(&spi->controller->bus_lock_mutex);
4234
4235 return ret;
4236}
4237EXPORT_SYMBOL_GPL(spi_sync);
4238
4239/**
4240 * spi_sync_locked - version of spi_sync with exclusive bus usage
4241 * @spi: device with which data will be exchanged
4242 * @message: describes the data transfers
4243 * Context: can sleep
4244 *
4245 * This call may only be used from a context that may sleep. The sleep
4246 * is non-interruptible, and has no timeout. Low-overhead controller
4247 * drivers may DMA directly into and out of the message buffers.
4248 *
4249 * This call should be used by drivers that require exclusive access to the
4250 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4251 * be released by a spi_bus_unlock call when the exclusive access is over.
4252 *
4253 * Return: zero on success, else a negative error code.
4254 */
4255int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4256{
4257 return __spi_sync(spi, message);
4258}
4259EXPORT_SYMBOL_GPL(spi_sync_locked);
4260
4261/**
4262 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4263 * @ctlr: SPI bus master that should be locked for exclusive bus access
4264 * Context: can sleep
4265 *
4266 * This call may only be used from a context that may sleep. The sleep
4267 * is non-interruptible, and has no timeout.
4268 *
4269 * This call should be used by drivers that require exclusive access to the
4270 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4271 * exclusive access is over. Data transfer must be done by spi_sync_locked
4272 * and spi_async_locked calls when the SPI bus lock is held.
4273 *
4274 * Return: always zero.
4275 */
4276int spi_bus_lock(struct spi_controller *ctlr)
4277{
4278 unsigned long flags;
4279
4280 mutex_lock(&ctlr->bus_lock_mutex);
4281
4282 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4283 ctlr->bus_lock_flag = 1;
4284 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4285
4286 /* Mutex remains locked until spi_bus_unlock() is called */
4287
4288 return 0;
4289}
4290EXPORT_SYMBOL_GPL(spi_bus_lock);
4291
4292/**
4293 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4294 * @ctlr: SPI bus master that was locked for exclusive bus access
4295 * Context: can sleep
4296 *
4297 * This call may only be used from a context that may sleep. The sleep
4298 * is non-interruptible, and has no timeout.
4299 *
4300 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4301 * call.
4302 *
4303 * Return: always zero.
4304 */
4305int spi_bus_unlock(struct spi_controller *ctlr)
4306{
4307 ctlr->bus_lock_flag = 0;
4308
4309 mutex_unlock(&ctlr->bus_lock_mutex);
4310
4311 return 0;
4312}
4313EXPORT_SYMBOL_GPL(spi_bus_unlock);
4314
4315/* Portable code must never pass more than 32 bytes */
4316#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4317
4318static u8 *buf;
4319
4320/**
4321 * spi_write_then_read - SPI synchronous write followed by read
4322 * @spi: device with which data will be exchanged
4323 * @txbuf: data to be written (need not be dma-safe)
4324 * @n_tx: size of txbuf, in bytes
4325 * @rxbuf: buffer into which data will be read (need not be dma-safe)
4326 * @n_rx: size of rxbuf, in bytes
4327 * Context: can sleep
4328 *
4329 * This performs a half duplex MicroWire style transaction with the
4330 * device, sending txbuf and then reading rxbuf. The return value
4331 * is zero for success, else a negative errno status code.
4332 * This call may only be used from a context that may sleep.
4333 *
4334 * Parameters to this routine are always copied using a small buffer.
4335 * Performance-sensitive or bulk transfer code should instead use
4336 * spi_{async,sync}() calls with dma-safe buffers.
4337 *
4338 * Return: zero on success, else a negative error code.
4339 */
4340int spi_write_then_read(struct spi_device *spi,
4341 const void *txbuf, unsigned n_tx,
4342 void *rxbuf, unsigned n_rx)
4343{
4344 static DEFINE_MUTEX(lock);
4345
4346 int status;
4347 struct spi_message message;
4348 struct spi_transfer x[2];
4349 u8 *local_buf;
4350
4351 /*
4352 * Use preallocated DMA-safe buffer if we can. We can't avoid
4353 * copying here, (as a pure convenience thing), but we can
4354 * keep heap costs out of the hot path unless someone else is
4355 * using the pre-allocated buffer or the transfer is too large.
4356 */
4357 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4358 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4359 GFP_KERNEL | GFP_DMA);
4360 if (!local_buf)
4361 return -ENOMEM;
4362 } else {
4363 local_buf = buf;
4364 }
4365
4366 spi_message_init(&message);
4367 memset(x, 0, sizeof(x));
4368 if (n_tx) {
4369 x[0].len = n_tx;
4370 spi_message_add_tail(&x[0], &message);
4371 }
4372 if (n_rx) {
4373 x[1].len = n_rx;
4374 spi_message_add_tail(&x[1], &message);
4375 }
4376
4377 memcpy(local_buf, txbuf, n_tx);
4378 x[0].tx_buf = local_buf;
4379 x[1].rx_buf = local_buf + n_tx;
4380
4381 /* Do the i/o */
4382 status = spi_sync(spi, &message);
4383 if (status == 0)
4384 memcpy(rxbuf, x[1].rx_buf, n_rx);
4385
4386 if (x[0].tx_buf == buf)
4387 mutex_unlock(&lock);
4388 else
4389 kfree(local_buf);
4390
4391 return status;
4392}
4393EXPORT_SYMBOL_GPL(spi_write_then_read);
4394
4395/*-------------------------------------------------------------------------*/
4396
4397#if IS_ENABLED(CONFIG_OF_DYNAMIC)
4398/* Must call put_device() when done with returned spi_device device */
4399static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4400{
4401 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4402
4403 return dev ? to_spi_device(dev) : NULL;
4404}
4405
4406/* The spi controllers are not using spi_bus, so we find it with another way */
4407static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4408{
4409 struct device *dev;
4410
4411 dev = class_find_device_by_of_node(&spi_master_class, node);
4412 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4413 dev = class_find_device_by_of_node(&spi_slave_class, node);
4414 if (!dev)
4415 return NULL;
4416
4417 /* Reference got in class_find_device */
4418 return container_of(dev, struct spi_controller, dev);
4419}
4420
4421static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4422 void *arg)
4423{
4424 struct of_reconfig_data *rd = arg;
4425 struct spi_controller *ctlr;
4426 struct spi_device *spi;
4427
4428 switch (of_reconfig_get_state_change(action, arg)) {
4429 case OF_RECONFIG_CHANGE_ADD:
4430 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4431 if (ctlr == NULL)
4432 return NOTIFY_OK; /* Not for us */
4433
4434 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4435 put_device(&ctlr->dev);
4436 return NOTIFY_OK;
4437 }
4438
4439 spi = of_register_spi_device(ctlr, rd->dn);
4440 put_device(&ctlr->dev);
4441
4442 if (IS_ERR(spi)) {
4443 pr_err("%s: failed to create for '%pOF'\n",
4444 __func__, rd->dn);
4445 of_node_clear_flag(rd->dn, OF_POPULATED);
4446 return notifier_from_errno(PTR_ERR(spi));
4447 }
4448 break;
4449
4450 case OF_RECONFIG_CHANGE_REMOVE:
4451 /* Already depopulated? */
4452 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4453 return NOTIFY_OK;
4454
4455 /* Find our device by node */
4456 spi = of_find_spi_device_by_node(rd->dn);
4457 if (spi == NULL)
4458 return NOTIFY_OK; /* No? not meant for us */
4459
4460 /* Unregister takes one ref away */
4461 spi_unregister_device(spi);
4462
4463 /* And put the reference of the find */
4464 put_device(&spi->dev);
4465 break;
4466 }
4467
4468 return NOTIFY_OK;
4469}
4470
4471static struct notifier_block spi_of_notifier = {
4472 .notifier_call = of_spi_notify,
4473};
4474#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4475extern struct notifier_block spi_of_notifier;
4476#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4477
4478#if IS_ENABLED(CONFIG_ACPI)
4479static int spi_acpi_controller_match(struct device *dev, const void *data)
4480{
4481 return ACPI_COMPANION(dev->parent) == data;
4482}
4483
4484static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4485{
4486 struct device *dev;
4487
4488 dev = class_find_device(&spi_master_class, NULL, adev,
4489 spi_acpi_controller_match);
4490 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4491 dev = class_find_device(&spi_slave_class, NULL, adev,
4492 spi_acpi_controller_match);
4493 if (!dev)
4494 return NULL;
4495
4496 return container_of(dev, struct spi_controller, dev);
4497}
4498
4499static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4500{
4501 struct device *dev;
4502
4503 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4504 return to_spi_device(dev);
4505}
4506
4507static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4508 void *arg)
4509{
4510 struct acpi_device *adev = arg;
4511 struct spi_controller *ctlr;
4512 struct spi_device *spi;
4513
4514 switch (value) {
4515 case ACPI_RECONFIG_DEVICE_ADD:
4516 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4517 if (!ctlr)
4518 break;
4519
4520 acpi_register_spi_device(ctlr, adev);
4521 put_device(&ctlr->dev);
4522 break;
4523 case ACPI_RECONFIG_DEVICE_REMOVE:
4524 if (!acpi_device_enumerated(adev))
4525 break;
4526
4527 spi = acpi_spi_find_device_by_adev(adev);
4528 if (!spi)
4529 break;
4530
4531 spi_unregister_device(spi);
4532 put_device(&spi->dev);
4533 break;
4534 }
4535
4536 return NOTIFY_OK;
4537}
4538
4539static struct notifier_block spi_acpi_notifier = {
4540 .notifier_call = acpi_spi_notify,
4541};
4542#else
4543extern struct notifier_block spi_acpi_notifier;
4544#endif
4545
4546static int __init spi_init(void)
4547{
4548 int status;
4549
4550 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4551 if (!buf) {
4552 status = -ENOMEM;
4553 goto err0;
4554 }
4555
4556 status = bus_register(&spi_bus_type);
4557 if (status < 0)
4558 goto err1;
4559
4560 status = class_register(&spi_master_class);
4561 if (status < 0)
4562 goto err2;
4563
4564 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4565 status = class_register(&spi_slave_class);
4566 if (status < 0)
4567 goto err3;
4568 }
4569
4570 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4571 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4572 if (IS_ENABLED(CONFIG_ACPI))
4573 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4574
4575 return 0;
4576
4577err3:
4578 class_unregister(&spi_master_class);
4579err2:
4580 bus_unregister(&spi_bus_type);
4581err1:
4582 kfree(buf);
4583 buf = NULL;
4584err0:
4585 return status;
4586}
4587
4588/*
4589 * A board_info is normally registered in arch_initcall(),
4590 * but even essential drivers wait till later.
4591 *
4592 * REVISIT only boardinfo really needs static linking. The rest (device and
4593 * driver registration) _could_ be dynamically linked (modular) ... Costs
4594 * include needing to have boardinfo data structures be much more public.
4595 */
4596postcore_initcall(spi_init);