4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/property.h>
35 #include <linux/export.h>
36 #include <linux/sched/rt.h>
37 #include <uapi/linux/sched/types.h>
38 #include <linux/delay.h>
39 #include <linux/kthread.h>
40 #include <linux/ioport.h>
41 #include <linux/acpi.h>
42 #include <linux/highmem.h>
43 #include <linux/idr.h>
44 #include <linux/platform_data/x86/apple.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/spi.h>
49 #include "internals.h"
51 static DEFINE_IDR(spi_master_idr
);
53 static void spidev_release(struct device
*dev
)
55 struct spi_device
*spi
= to_spi_device(dev
);
57 /* spi controllers may cleanup for released devices */
58 if (spi
->controller
->cleanup
)
59 spi
->controller
->cleanup(spi
);
61 spi_controller_put(spi
->controller
);
66 modalias_show(struct device
*dev
, struct device_attribute
*a
, char *buf
)
68 const struct spi_device
*spi
= to_spi_device(dev
);
71 len
= acpi_device_modalias(dev
, buf
, PAGE_SIZE
- 1);
75 return sprintf(buf
, "%s%s\n", SPI_MODULE_PREFIX
, spi
->modalias
);
77 static DEVICE_ATTR_RO(modalias
);
79 #define SPI_STATISTICS_ATTRS(field, file) \
80 static ssize_t spi_controller_##field##_show(struct device *dev, \
81 struct device_attribute *attr, \
84 struct spi_controller *ctlr = container_of(dev, \
85 struct spi_controller, dev); \
86 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
88 static struct device_attribute dev_attr_spi_controller_##field = { \
89 .attr = { .name = file, .mode = 0444 }, \
90 .show = spi_controller_##field##_show, \
92 static ssize_t spi_device_##field##_show(struct device *dev, \
93 struct device_attribute *attr, \
96 struct spi_device *spi = to_spi_device(dev); \
97 return spi_statistics_##field##_show(&spi->statistics, buf); \
99 static struct device_attribute dev_attr_spi_device_##field = { \
100 .attr = { .name = file, .mode = 0444 }, \
101 .show = spi_device_##field##_show, \
104 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
105 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
108 unsigned long flags; \
110 spin_lock_irqsave(&stat->lock, flags); \
111 len = sprintf(buf, format_string, stat->field); \
112 spin_unlock_irqrestore(&stat->lock, flags); \
115 SPI_STATISTICS_ATTRS(name, file)
117 #define SPI_STATISTICS_SHOW(field, format_string) \
118 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
119 field, format_string)
121 SPI_STATISTICS_SHOW(messages
, "%lu");
122 SPI_STATISTICS_SHOW(transfers
, "%lu");
123 SPI_STATISTICS_SHOW(errors
, "%lu");
124 SPI_STATISTICS_SHOW(timedout
, "%lu");
126 SPI_STATISTICS_SHOW(spi_sync
, "%lu");
127 SPI_STATISTICS_SHOW(spi_sync_immediate
, "%lu");
128 SPI_STATISTICS_SHOW(spi_async
, "%lu");
130 SPI_STATISTICS_SHOW(bytes
, "%llu");
131 SPI_STATISTICS_SHOW(bytes_rx
, "%llu");
132 SPI_STATISTICS_SHOW(bytes_tx
, "%llu");
134 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
135 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
136 "transfer_bytes_histo_" number, \
137 transfer_bytes_histo[index], "%lu")
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
147 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
148 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
149 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
150 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
151 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
152 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
153 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
154 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
156 SPI_STATISTICS_SHOW(transfers_split_maxsize
, "%lu");
158 static struct attribute
*spi_dev_attrs
[] = {
159 &dev_attr_modalias
.attr
,
163 static const struct attribute_group spi_dev_group
= {
164 .attrs
= spi_dev_attrs
,
167 static struct attribute
*spi_device_statistics_attrs
[] = {
168 &dev_attr_spi_device_messages
.attr
,
169 &dev_attr_spi_device_transfers
.attr
,
170 &dev_attr_spi_device_errors
.attr
,
171 &dev_attr_spi_device_timedout
.attr
,
172 &dev_attr_spi_device_spi_sync
.attr
,
173 &dev_attr_spi_device_spi_sync_immediate
.attr
,
174 &dev_attr_spi_device_spi_async
.attr
,
175 &dev_attr_spi_device_bytes
.attr
,
176 &dev_attr_spi_device_bytes_rx
.attr
,
177 &dev_attr_spi_device_bytes_tx
.attr
,
178 &dev_attr_spi_device_transfer_bytes_histo0
.attr
,
179 &dev_attr_spi_device_transfer_bytes_histo1
.attr
,
180 &dev_attr_spi_device_transfer_bytes_histo2
.attr
,
181 &dev_attr_spi_device_transfer_bytes_histo3
.attr
,
182 &dev_attr_spi_device_transfer_bytes_histo4
.attr
,
183 &dev_attr_spi_device_transfer_bytes_histo5
.attr
,
184 &dev_attr_spi_device_transfer_bytes_histo6
.attr
,
185 &dev_attr_spi_device_transfer_bytes_histo7
.attr
,
186 &dev_attr_spi_device_transfer_bytes_histo8
.attr
,
187 &dev_attr_spi_device_transfer_bytes_histo9
.attr
,
188 &dev_attr_spi_device_transfer_bytes_histo10
.attr
,
189 &dev_attr_spi_device_transfer_bytes_histo11
.attr
,
190 &dev_attr_spi_device_transfer_bytes_histo12
.attr
,
191 &dev_attr_spi_device_transfer_bytes_histo13
.attr
,
192 &dev_attr_spi_device_transfer_bytes_histo14
.attr
,
193 &dev_attr_spi_device_transfer_bytes_histo15
.attr
,
194 &dev_attr_spi_device_transfer_bytes_histo16
.attr
,
195 &dev_attr_spi_device_transfers_split_maxsize
.attr
,
199 static const struct attribute_group spi_device_statistics_group
= {
200 .name
= "statistics",
201 .attrs
= spi_device_statistics_attrs
,
204 static const struct attribute_group
*spi_dev_groups
[] = {
206 &spi_device_statistics_group
,
210 static struct attribute
*spi_controller_statistics_attrs
[] = {
211 &dev_attr_spi_controller_messages
.attr
,
212 &dev_attr_spi_controller_transfers
.attr
,
213 &dev_attr_spi_controller_errors
.attr
,
214 &dev_attr_spi_controller_timedout
.attr
,
215 &dev_attr_spi_controller_spi_sync
.attr
,
216 &dev_attr_spi_controller_spi_sync_immediate
.attr
,
217 &dev_attr_spi_controller_spi_async
.attr
,
218 &dev_attr_spi_controller_bytes
.attr
,
219 &dev_attr_spi_controller_bytes_rx
.attr
,
220 &dev_attr_spi_controller_bytes_tx
.attr
,
221 &dev_attr_spi_controller_transfer_bytes_histo0
.attr
,
222 &dev_attr_spi_controller_transfer_bytes_histo1
.attr
,
223 &dev_attr_spi_controller_transfer_bytes_histo2
.attr
,
224 &dev_attr_spi_controller_transfer_bytes_histo3
.attr
,
225 &dev_attr_spi_controller_transfer_bytes_histo4
.attr
,
226 &dev_attr_spi_controller_transfer_bytes_histo5
.attr
,
227 &dev_attr_spi_controller_transfer_bytes_histo6
.attr
,
228 &dev_attr_spi_controller_transfer_bytes_histo7
.attr
,
229 &dev_attr_spi_controller_transfer_bytes_histo8
.attr
,
230 &dev_attr_spi_controller_transfer_bytes_histo9
.attr
,
231 &dev_attr_spi_controller_transfer_bytes_histo10
.attr
,
232 &dev_attr_spi_controller_transfer_bytes_histo11
.attr
,
233 &dev_attr_spi_controller_transfer_bytes_histo12
.attr
,
234 &dev_attr_spi_controller_transfer_bytes_histo13
.attr
,
235 &dev_attr_spi_controller_transfer_bytes_histo14
.attr
,
236 &dev_attr_spi_controller_transfer_bytes_histo15
.attr
,
237 &dev_attr_spi_controller_transfer_bytes_histo16
.attr
,
238 &dev_attr_spi_controller_transfers_split_maxsize
.attr
,
242 static const struct attribute_group spi_controller_statistics_group
= {
243 .name
= "statistics",
244 .attrs
= spi_controller_statistics_attrs
,
247 static const struct attribute_group
*spi_master_groups
[] = {
248 &spi_controller_statistics_group
,
252 void spi_statistics_add_transfer_stats(struct spi_statistics
*stats
,
253 struct spi_transfer
*xfer
,
254 struct spi_controller
*ctlr
)
257 int l2len
= min(fls(xfer
->len
), SPI_STATISTICS_HISTO_SIZE
) - 1;
262 spin_lock_irqsave(&stats
->lock
, flags
);
265 stats
->transfer_bytes_histo
[l2len
]++;
267 stats
->bytes
+= xfer
->len
;
268 if ((xfer
->tx_buf
) &&
269 (xfer
->tx_buf
!= ctlr
->dummy_tx
))
270 stats
->bytes_tx
+= xfer
->len
;
271 if ((xfer
->rx_buf
) &&
272 (xfer
->rx_buf
!= ctlr
->dummy_rx
))
273 stats
->bytes_rx
+= xfer
->len
;
275 spin_unlock_irqrestore(&stats
->lock
, flags
);
277 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats
);
279 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
280 * and the sysfs version makes coldplug work too.
283 static const struct spi_device_id
*spi_match_id(const struct spi_device_id
*id
,
284 const struct spi_device
*sdev
)
286 while (id
->name
[0]) {
287 if (!strcmp(sdev
->modalias
, id
->name
))
294 const struct spi_device_id
*spi_get_device_id(const struct spi_device
*sdev
)
296 const struct spi_driver
*sdrv
= to_spi_driver(sdev
->dev
.driver
);
298 return spi_match_id(sdrv
->id_table
, sdev
);
300 EXPORT_SYMBOL_GPL(spi_get_device_id
);
302 static int spi_match_device(struct device
*dev
, struct device_driver
*drv
)
304 const struct spi_device
*spi
= to_spi_device(dev
);
305 const struct spi_driver
*sdrv
= to_spi_driver(drv
);
307 /* Attempt an OF style match */
308 if (of_driver_match_device(dev
, drv
))
312 if (acpi_driver_match_device(dev
, drv
))
316 return !!spi_match_id(sdrv
->id_table
, spi
);
318 return strcmp(spi
->modalias
, drv
->name
) == 0;
321 static int spi_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
323 const struct spi_device
*spi
= to_spi_device(dev
);
326 rc
= acpi_device_uevent_modalias(dev
, env
);
330 return add_uevent_var(env
, "MODALIAS=%s%s", SPI_MODULE_PREFIX
, spi
->modalias
);
333 struct bus_type spi_bus_type
= {
335 .dev_groups
= spi_dev_groups
,
336 .match
= spi_match_device
,
337 .uevent
= spi_uevent
,
339 EXPORT_SYMBOL_GPL(spi_bus_type
);
342 static int spi_drv_probe(struct device
*dev
)
344 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
345 struct spi_device
*spi
= to_spi_device(dev
);
348 ret
= of_clk_set_defaults(dev
->of_node
, false);
353 spi
->irq
= of_irq_get(dev
->of_node
, 0);
354 if (spi
->irq
== -EPROBE_DEFER
)
355 return -EPROBE_DEFER
;
360 ret
= dev_pm_domain_attach(dev
, true);
361 if (ret
!= -EPROBE_DEFER
) {
362 ret
= sdrv
->probe(spi
);
364 dev_pm_domain_detach(dev
, true);
370 static int spi_drv_remove(struct device
*dev
)
372 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
375 ret
= sdrv
->remove(to_spi_device(dev
));
376 dev_pm_domain_detach(dev
, true);
381 static void spi_drv_shutdown(struct device
*dev
)
383 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
385 sdrv
->shutdown(to_spi_device(dev
));
389 * __spi_register_driver - register a SPI driver
390 * @owner: owner module of the driver to register
391 * @sdrv: the driver to register
394 * Return: zero on success, else a negative error code.
396 int __spi_register_driver(struct module
*owner
, struct spi_driver
*sdrv
)
398 sdrv
->driver
.owner
= owner
;
399 sdrv
->driver
.bus
= &spi_bus_type
;
401 sdrv
->driver
.probe
= spi_drv_probe
;
403 sdrv
->driver
.remove
= spi_drv_remove
;
405 sdrv
->driver
.shutdown
= spi_drv_shutdown
;
406 return driver_register(&sdrv
->driver
);
408 EXPORT_SYMBOL_GPL(__spi_register_driver
);
410 /*-------------------------------------------------------------------------*/
412 /* SPI devices should normally not be created by SPI device drivers; that
413 * would make them board-specific. Similarly with SPI controller drivers.
414 * Device registration normally goes into like arch/.../mach.../board-YYY.c
415 * with other readonly (flashable) information about mainboard devices.
419 struct list_head list
;
420 struct spi_board_info board_info
;
423 static LIST_HEAD(board_list
);
424 static LIST_HEAD(spi_controller_list
);
427 * Used to protect add/del opertion for board_info list and
428 * spi_controller list, and their matching process
429 * also used to protect object of type struct idr
431 static DEFINE_MUTEX(board_lock
);
434 * spi_alloc_device - Allocate a new SPI device
435 * @ctlr: Controller to which device is connected
438 * Allows a driver to allocate and initialize a spi_device without
439 * registering it immediately. This allows a driver to directly
440 * fill the spi_device with device parameters before calling
441 * spi_add_device() on it.
443 * Caller is responsible to call spi_add_device() on the returned
444 * spi_device structure to add it to the SPI controller. If the caller
445 * needs to discard the spi_device without adding it, then it should
446 * call spi_dev_put() on it.
448 * Return: a pointer to the new device, or NULL.
450 struct spi_device
*spi_alloc_device(struct spi_controller
*ctlr
)
452 struct spi_device
*spi
;
454 if (!spi_controller_get(ctlr
))
457 spi
= kzalloc(sizeof(*spi
), GFP_KERNEL
);
459 spi_controller_put(ctlr
);
463 spi
->master
= spi
->controller
= ctlr
;
464 spi
->dev
.parent
= &ctlr
->dev
;
465 spi
->dev
.bus
= &spi_bus_type
;
466 spi
->dev
.release
= spidev_release
;
467 spi
->cs_gpio
= -ENOENT
;
469 spin_lock_init(&spi
->statistics
.lock
);
471 device_initialize(&spi
->dev
);
474 EXPORT_SYMBOL_GPL(spi_alloc_device
);
476 static void spi_dev_set_name(struct spi_device
*spi
)
478 struct acpi_device
*adev
= ACPI_COMPANION(&spi
->dev
);
481 dev_set_name(&spi
->dev
, "spi-%s", acpi_dev_name(adev
));
485 dev_set_name(&spi
->dev
, "%s.%u", dev_name(&spi
->controller
->dev
),
489 static int spi_dev_check(struct device
*dev
, void *data
)
491 struct spi_device
*spi
= to_spi_device(dev
);
492 struct spi_device
*new_spi
= data
;
494 if (spi
->controller
== new_spi
->controller
&&
495 spi
->chip_select
== new_spi
->chip_select
)
501 * spi_add_device - Add spi_device allocated with spi_alloc_device
502 * @spi: spi_device to register
504 * Companion function to spi_alloc_device. Devices allocated with
505 * spi_alloc_device can be added onto the spi bus with this function.
507 * Return: 0 on success; negative errno on failure
509 int spi_add_device(struct spi_device
*spi
)
511 static DEFINE_MUTEX(spi_add_lock
);
512 struct spi_controller
*ctlr
= spi
->controller
;
513 struct device
*dev
= ctlr
->dev
.parent
;
516 /* Chipselects are numbered 0..max; validate. */
517 if (spi
->chip_select
>= ctlr
->num_chipselect
) {
518 dev_err(dev
, "cs%d >= max %d\n", spi
->chip_select
,
519 ctlr
->num_chipselect
);
523 /* Set the bus ID string */
524 spi_dev_set_name(spi
);
526 /* We need to make sure there's no other device with this
527 * chipselect **BEFORE** we call setup(), else we'll trash
528 * its configuration. Lock against concurrent add() calls.
530 mutex_lock(&spi_add_lock
);
532 status
= bus_for_each_dev(&spi_bus_type
, NULL
, spi
, spi_dev_check
);
534 dev_err(dev
, "chipselect %d already in use\n",
540 spi
->cs_gpio
= ctlr
->cs_gpios
[spi
->chip_select
];
542 /* Drivers may modify this initial i/o setup, but will
543 * normally rely on the device being setup. Devices
544 * using SPI_CS_HIGH can't coexist well otherwise...
546 status
= spi_setup(spi
);
548 dev_err(dev
, "can't setup %s, status %d\n",
549 dev_name(&spi
->dev
), status
);
553 /* Device may be bound to an active driver when this returns */
554 status
= device_add(&spi
->dev
);
556 dev_err(dev
, "can't add %s, status %d\n",
557 dev_name(&spi
->dev
), status
);
559 dev_dbg(dev
, "registered child %s\n", dev_name(&spi
->dev
));
562 mutex_unlock(&spi_add_lock
);
565 EXPORT_SYMBOL_GPL(spi_add_device
);
568 * spi_new_device - instantiate one new SPI device
569 * @ctlr: Controller to which device is connected
570 * @chip: Describes the SPI device
573 * On typical mainboards, this is purely internal; and it's not needed
574 * after board init creates the hard-wired devices. Some development
575 * platforms may not be able to use spi_register_board_info though, and
576 * this is exported so that for example a USB or parport based adapter
577 * driver could add devices (which it would learn about out-of-band).
579 * Return: the new device, or NULL.
581 struct spi_device
*spi_new_device(struct spi_controller
*ctlr
,
582 struct spi_board_info
*chip
)
584 struct spi_device
*proxy
;
587 /* NOTE: caller did any chip->bus_num checks necessary.
589 * Also, unless we change the return value convention to use
590 * error-or-pointer (not NULL-or-pointer), troubleshootability
591 * suggests syslogged diagnostics are best here (ugh).
594 proxy
= spi_alloc_device(ctlr
);
598 WARN_ON(strlen(chip
->modalias
) >= sizeof(proxy
->modalias
));
600 proxy
->chip_select
= chip
->chip_select
;
601 proxy
->max_speed_hz
= chip
->max_speed_hz
;
602 proxy
->mode
= chip
->mode
;
603 proxy
->irq
= chip
->irq
;
604 strlcpy(proxy
->modalias
, chip
->modalias
, sizeof(proxy
->modalias
));
605 proxy
->dev
.platform_data
= (void *) chip
->platform_data
;
606 proxy
->controller_data
= chip
->controller_data
;
607 proxy
->controller_state
= NULL
;
609 if (chip
->properties
) {
610 status
= device_add_properties(&proxy
->dev
, chip
->properties
);
613 "failed to add properties to '%s': %d\n",
614 chip
->modalias
, status
);
619 status
= spi_add_device(proxy
);
621 goto err_remove_props
;
626 if (chip
->properties
)
627 device_remove_properties(&proxy
->dev
);
632 EXPORT_SYMBOL_GPL(spi_new_device
);
635 * spi_unregister_device - unregister a single SPI device
636 * @spi: spi_device to unregister
638 * Start making the passed SPI device vanish. Normally this would be handled
639 * by spi_unregister_controller().
641 void spi_unregister_device(struct spi_device
*spi
)
646 if (spi
->dev
.of_node
) {
647 of_node_clear_flag(spi
->dev
.of_node
, OF_POPULATED
);
648 of_node_put(spi
->dev
.of_node
);
650 if (ACPI_COMPANION(&spi
->dev
))
651 acpi_device_clear_enumerated(ACPI_COMPANION(&spi
->dev
));
652 device_unregister(&spi
->dev
);
654 EXPORT_SYMBOL_GPL(spi_unregister_device
);
656 static void spi_match_controller_to_boardinfo(struct spi_controller
*ctlr
,
657 struct spi_board_info
*bi
)
659 struct spi_device
*dev
;
661 if (ctlr
->bus_num
!= bi
->bus_num
)
664 dev
= spi_new_device(ctlr
, bi
);
666 dev_err(ctlr
->dev
.parent
, "can't create new device for %s\n",
671 * spi_register_board_info - register SPI devices for a given board
672 * @info: array of chip descriptors
673 * @n: how many descriptors are provided
676 * Board-specific early init code calls this (probably during arch_initcall)
677 * with segments of the SPI device table. Any device nodes are created later,
678 * after the relevant parent SPI controller (bus_num) is defined. We keep
679 * this table of devices forever, so that reloading a controller driver will
680 * not make Linux forget about these hard-wired devices.
682 * Other code can also call this, e.g. a particular add-on board might provide
683 * SPI devices through its expansion connector, so code initializing that board
684 * would naturally declare its SPI devices.
686 * The board info passed can safely be __initdata ... but be careful of
687 * any embedded pointers (platform_data, etc), they're copied as-is.
688 * Device properties are deep-copied though.
690 * Return: zero on success, else a negative error code.
692 int spi_register_board_info(struct spi_board_info
const *info
, unsigned n
)
694 struct boardinfo
*bi
;
700 bi
= kcalloc(n
, sizeof(*bi
), GFP_KERNEL
);
704 for (i
= 0; i
< n
; i
++, bi
++, info
++) {
705 struct spi_controller
*ctlr
;
707 memcpy(&bi
->board_info
, info
, sizeof(*info
));
708 if (info
->properties
) {
709 bi
->board_info
.properties
=
710 property_entries_dup(info
->properties
);
711 if (IS_ERR(bi
->board_info
.properties
))
712 return PTR_ERR(bi
->board_info
.properties
);
715 mutex_lock(&board_lock
);
716 list_add_tail(&bi
->list
, &board_list
);
717 list_for_each_entry(ctlr
, &spi_controller_list
, list
)
718 spi_match_controller_to_boardinfo(ctlr
,
720 mutex_unlock(&board_lock
);
726 /*-------------------------------------------------------------------------*/
728 static void spi_set_cs(struct spi_device
*spi
, bool enable
)
730 if (spi
->mode
& SPI_CS_HIGH
)
733 if (gpio_is_valid(spi
->cs_gpio
)) {
734 gpio_set_value(spi
->cs_gpio
, !enable
);
735 /* Some SPI masters need both GPIO CS & slave_select */
736 if ((spi
->controller
->flags
& SPI_MASTER_GPIO_SS
) &&
737 spi
->controller
->set_cs
)
738 spi
->controller
->set_cs(spi
, !enable
);
739 } else if (spi
->controller
->set_cs
) {
740 spi
->controller
->set_cs(spi
, !enable
);
744 #ifdef CONFIG_HAS_DMA
745 int spi_map_buf(struct spi_controller
*ctlr
, struct device
*dev
,
746 struct sg_table
*sgt
, void *buf
, size_t len
,
747 enum dma_data_direction dir
)
749 const bool vmalloced_buf
= is_vmalloc_addr(buf
);
750 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
751 #ifdef CONFIG_HIGHMEM
752 const bool kmap_buf
= ((unsigned long)buf
>= PKMAP_BASE
&&
753 (unsigned long)buf
< (PKMAP_BASE
+
754 (LAST_PKMAP
* PAGE_SIZE
)));
756 const bool kmap_buf
= false;
760 struct page
*vm_page
;
761 struct scatterlist
*sg
;
766 if (vmalloced_buf
|| kmap_buf
) {
767 desc_len
= min_t(int, max_seg_size
, PAGE_SIZE
);
768 sgs
= DIV_ROUND_UP(len
+ offset_in_page(buf
), desc_len
);
769 } else if (virt_addr_valid(buf
)) {
770 desc_len
= min_t(int, max_seg_size
, ctlr
->max_dma_len
);
771 sgs
= DIV_ROUND_UP(len
, desc_len
);
776 ret
= sg_alloc_table(sgt
, sgs
, GFP_KERNEL
);
781 for (i
= 0; i
< sgs
; i
++) {
783 if (vmalloced_buf
|| kmap_buf
) {
785 * Next scatterlist entry size is the minimum between
786 * the desc_len and the remaining buffer length that
789 min
= min_t(size_t, desc_len
,
791 PAGE_SIZE
- offset_in_page(buf
)));
793 vm_page
= vmalloc_to_page(buf
);
795 vm_page
= kmap_to_page(buf
);
800 sg_set_page(sg
, vm_page
,
801 min
, offset_in_page(buf
));
803 min
= min_t(size_t, len
, desc_len
);
805 sg_set_buf(sg
, sg_buf
, min
);
813 ret
= dma_map_sg(dev
, sgt
->sgl
, sgt
->nents
, dir
);
826 void spi_unmap_buf(struct spi_controller
*ctlr
, struct device
*dev
,
827 struct sg_table
*sgt
, enum dma_data_direction dir
)
829 if (sgt
->orig_nents
) {
830 dma_unmap_sg(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
835 static int __spi_map_msg(struct spi_controller
*ctlr
, struct spi_message
*msg
)
837 struct device
*tx_dev
, *rx_dev
;
838 struct spi_transfer
*xfer
;
845 tx_dev
= ctlr
->dma_tx
->device
->dev
;
847 tx_dev
= ctlr
->dev
.parent
;
850 rx_dev
= ctlr
->dma_rx
->device
->dev
;
852 rx_dev
= ctlr
->dev
.parent
;
854 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
855 if (!ctlr
->can_dma(ctlr
, msg
->spi
, xfer
))
858 if (xfer
->tx_buf
!= NULL
) {
859 ret
= spi_map_buf(ctlr
, tx_dev
, &xfer
->tx_sg
,
860 (void *)xfer
->tx_buf
, xfer
->len
,
866 if (xfer
->rx_buf
!= NULL
) {
867 ret
= spi_map_buf(ctlr
, rx_dev
, &xfer
->rx_sg
,
868 xfer
->rx_buf
, xfer
->len
,
871 spi_unmap_buf(ctlr
, tx_dev
, &xfer
->tx_sg
,
878 ctlr
->cur_msg_mapped
= true;
883 static int __spi_unmap_msg(struct spi_controller
*ctlr
, struct spi_message
*msg
)
885 struct spi_transfer
*xfer
;
886 struct device
*tx_dev
, *rx_dev
;
888 if (!ctlr
->cur_msg_mapped
|| !ctlr
->can_dma
)
892 tx_dev
= ctlr
->dma_tx
->device
->dev
;
894 tx_dev
= ctlr
->dev
.parent
;
897 rx_dev
= ctlr
->dma_rx
->device
->dev
;
899 rx_dev
= ctlr
->dev
.parent
;
901 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
902 if (!ctlr
->can_dma(ctlr
, msg
->spi
, xfer
))
905 spi_unmap_buf(ctlr
, rx_dev
, &xfer
->rx_sg
, DMA_FROM_DEVICE
);
906 spi_unmap_buf(ctlr
, tx_dev
, &xfer
->tx_sg
, DMA_TO_DEVICE
);
911 #else /* !CONFIG_HAS_DMA */
912 static inline int __spi_map_msg(struct spi_controller
*ctlr
,
913 struct spi_message
*msg
)
918 static inline int __spi_unmap_msg(struct spi_controller
*ctlr
,
919 struct spi_message
*msg
)
923 #endif /* !CONFIG_HAS_DMA */
925 static inline int spi_unmap_msg(struct spi_controller
*ctlr
,
926 struct spi_message
*msg
)
928 struct spi_transfer
*xfer
;
930 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
932 * Restore the original value of tx_buf or rx_buf if they are
935 if (xfer
->tx_buf
== ctlr
->dummy_tx
)
937 if (xfer
->rx_buf
== ctlr
->dummy_rx
)
941 return __spi_unmap_msg(ctlr
, msg
);
944 static int spi_map_msg(struct spi_controller
*ctlr
, struct spi_message
*msg
)
946 struct spi_transfer
*xfer
;
948 unsigned int max_tx
, max_rx
;
950 if (ctlr
->flags
& (SPI_CONTROLLER_MUST_RX
| SPI_CONTROLLER_MUST_TX
)) {
954 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
955 if ((ctlr
->flags
& SPI_CONTROLLER_MUST_TX
) &&
957 max_tx
= max(xfer
->len
, max_tx
);
958 if ((ctlr
->flags
& SPI_CONTROLLER_MUST_RX
) &&
960 max_rx
= max(xfer
->len
, max_rx
);
964 tmp
= krealloc(ctlr
->dummy_tx
, max_tx
,
965 GFP_KERNEL
| GFP_DMA
);
968 ctlr
->dummy_tx
= tmp
;
969 memset(tmp
, 0, max_tx
);
973 tmp
= krealloc(ctlr
->dummy_rx
, max_rx
,
974 GFP_KERNEL
| GFP_DMA
);
977 ctlr
->dummy_rx
= tmp
;
980 if (max_tx
|| max_rx
) {
981 list_for_each_entry(xfer
, &msg
->transfers
,
984 xfer
->tx_buf
= ctlr
->dummy_tx
;
986 xfer
->rx_buf
= ctlr
->dummy_rx
;
991 return __spi_map_msg(ctlr
, msg
);
995 * spi_transfer_one_message - Default implementation of transfer_one_message()
997 * This is a standard implementation of transfer_one_message() for
998 * drivers which implement a transfer_one() operation. It provides
999 * standard handling of delays and chip select management.
1001 static int spi_transfer_one_message(struct spi_controller
*ctlr
,
1002 struct spi_message
*msg
)
1004 struct spi_transfer
*xfer
;
1005 bool keep_cs
= false;
1007 unsigned long long ms
= 1;
1008 struct spi_statistics
*statm
= &ctlr
->statistics
;
1009 struct spi_statistics
*stats
= &msg
->spi
->statistics
;
1011 spi_set_cs(msg
->spi
, true);
1013 SPI_STATISTICS_INCREMENT_FIELD(statm
, messages
);
1014 SPI_STATISTICS_INCREMENT_FIELD(stats
, messages
);
1016 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
1017 trace_spi_transfer_start(msg
, xfer
);
1019 spi_statistics_add_transfer_stats(statm
, xfer
, ctlr
);
1020 spi_statistics_add_transfer_stats(stats
, xfer
, ctlr
);
1022 if (xfer
->tx_buf
|| xfer
->rx_buf
) {
1023 reinit_completion(&ctlr
->xfer_completion
);
1025 ret
= ctlr
->transfer_one(ctlr
, msg
->spi
, xfer
);
1027 SPI_STATISTICS_INCREMENT_FIELD(statm
,
1029 SPI_STATISTICS_INCREMENT_FIELD(stats
,
1031 dev_err(&msg
->spi
->dev
,
1032 "SPI transfer failed: %d\n", ret
);
1038 ms
= 8LL * 1000LL * xfer
->len
;
1039 do_div(ms
, xfer
->speed_hz
);
1040 ms
+= ms
+ 200; /* some tolerance */
1045 ms
= wait_for_completion_timeout(&ctlr
->xfer_completion
,
1046 msecs_to_jiffies(ms
));
1050 SPI_STATISTICS_INCREMENT_FIELD(statm
,
1052 SPI_STATISTICS_INCREMENT_FIELD(stats
,
1054 dev_err(&msg
->spi
->dev
,
1055 "SPI transfer timed out\n");
1056 msg
->status
= -ETIMEDOUT
;
1060 dev_err(&msg
->spi
->dev
,
1061 "Bufferless transfer has length %u\n",
1065 trace_spi_transfer_stop(msg
, xfer
);
1067 if (msg
->status
!= -EINPROGRESS
)
1070 if (xfer
->delay_usecs
) {
1071 u16 us
= xfer
->delay_usecs
;
1076 usleep_range(us
, us
+ DIV_ROUND_UP(us
, 10));
1079 if (xfer
->cs_change
) {
1080 if (list_is_last(&xfer
->transfer_list
,
1084 spi_set_cs(msg
->spi
, false);
1086 spi_set_cs(msg
->spi
, true);
1090 msg
->actual_length
+= xfer
->len
;
1094 if (ret
!= 0 || !keep_cs
)
1095 spi_set_cs(msg
->spi
, false);
1097 if (msg
->status
== -EINPROGRESS
)
1100 if (msg
->status
&& ctlr
->handle_err
)
1101 ctlr
->handle_err(ctlr
, msg
);
1103 spi_res_release(ctlr
, msg
);
1105 spi_finalize_current_message(ctlr
);
1111 * spi_finalize_current_transfer - report completion of a transfer
1112 * @ctlr: the controller reporting completion
1114 * Called by SPI drivers using the core transfer_one_message()
1115 * implementation to notify it that the current interrupt driven
1116 * transfer has finished and the next one may be scheduled.
1118 void spi_finalize_current_transfer(struct spi_controller
*ctlr
)
1120 complete(&ctlr
->xfer_completion
);
1122 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer
);
1125 * __spi_pump_messages - function which processes spi message queue
1126 * @ctlr: controller to process queue for
1127 * @in_kthread: true if we are in the context of the message pump thread
1129 * This function checks if there is any spi message in the queue that
1130 * needs processing and if so call out to the driver to initialize hardware
1131 * and transfer each message.
1133 * Note that it is called both from the kthread itself and also from
1134 * inside spi_sync(); the queue extraction handling at the top of the
1135 * function should deal with this safely.
1137 static void __spi_pump_messages(struct spi_controller
*ctlr
, bool in_kthread
)
1139 unsigned long flags
;
1140 bool was_busy
= false;
1144 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1146 /* Make sure we are not already running a message */
1147 if (ctlr
->cur_msg
) {
1148 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1152 /* If another context is idling the device then defer */
1154 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1155 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1159 /* Check if the queue is idle */
1160 if (list_empty(&ctlr
->queue
) || !ctlr
->running
) {
1162 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1166 /* Only do teardown in the thread */
1168 kthread_queue_work(&ctlr
->kworker
,
1169 &ctlr
->pump_messages
);
1170 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1175 ctlr
->idling
= true;
1176 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1178 kfree(ctlr
->dummy_rx
);
1179 ctlr
->dummy_rx
= NULL
;
1180 kfree(ctlr
->dummy_tx
);
1181 ctlr
->dummy_tx
= NULL
;
1182 if (ctlr
->unprepare_transfer_hardware
&&
1183 ctlr
->unprepare_transfer_hardware(ctlr
))
1185 "failed to unprepare transfer hardware\n");
1186 if (ctlr
->auto_runtime_pm
) {
1187 pm_runtime_mark_last_busy(ctlr
->dev
.parent
);
1188 pm_runtime_put_autosuspend(ctlr
->dev
.parent
);
1190 trace_spi_controller_idle(ctlr
);
1192 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1193 ctlr
->idling
= false;
1194 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1198 /* Extract head of queue */
1200 list_first_entry(&ctlr
->queue
, struct spi_message
, queue
);
1202 list_del_init(&ctlr
->cur_msg
->queue
);
1207 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1209 mutex_lock(&ctlr
->io_mutex
);
1211 if (!was_busy
&& ctlr
->auto_runtime_pm
) {
1212 ret
= pm_runtime_get_sync(ctlr
->dev
.parent
);
1214 dev_err(&ctlr
->dev
, "Failed to power device: %d\n",
1216 mutex_unlock(&ctlr
->io_mutex
);
1222 trace_spi_controller_busy(ctlr
);
1224 if (!was_busy
&& ctlr
->prepare_transfer_hardware
) {
1225 ret
= ctlr
->prepare_transfer_hardware(ctlr
);
1228 "failed to prepare transfer hardware\n");
1230 if (ctlr
->auto_runtime_pm
)
1231 pm_runtime_put(ctlr
->dev
.parent
);
1232 mutex_unlock(&ctlr
->io_mutex
);
1237 trace_spi_message_start(ctlr
->cur_msg
);
1239 if (ctlr
->prepare_message
) {
1240 ret
= ctlr
->prepare_message(ctlr
, ctlr
->cur_msg
);
1242 dev_err(&ctlr
->dev
, "failed to prepare message: %d\n",
1244 ctlr
->cur_msg
->status
= ret
;
1245 spi_finalize_current_message(ctlr
);
1248 ctlr
->cur_msg_prepared
= true;
1251 ret
= spi_map_msg(ctlr
, ctlr
->cur_msg
);
1253 ctlr
->cur_msg
->status
= ret
;
1254 spi_finalize_current_message(ctlr
);
1258 ret
= ctlr
->transfer_one_message(ctlr
, ctlr
->cur_msg
);
1261 "failed to transfer one message from queue\n");
1266 mutex_unlock(&ctlr
->io_mutex
);
1268 /* Prod the scheduler in case transfer_one() was busy waiting */
1274 * spi_pump_messages - kthread work function which processes spi message queue
1275 * @work: pointer to kthread work struct contained in the controller struct
1277 static void spi_pump_messages(struct kthread_work
*work
)
1279 struct spi_controller
*ctlr
=
1280 container_of(work
, struct spi_controller
, pump_messages
);
1282 __spi_pump_messages(ctlr
, true);
1285 static int spi_init_queue(struct spi_controller
*ctlr
)
1287 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
- 1 };
1289 ctlr
->running
= false;
1292 kthread_init_worker(&ctlr
->kworker
);
1293 ctlr
->kworker_task
= kthread_run(kthread_worker_fn
, &ctlr
->kworker
,
1294 "%s", dev_name(&ctlr
->dev
));
1295 if (IS_ERR(ctlr
->kworker_task
)) {
1296 dev_err(&ctlr
->dev
, "failed to create message pump task\n");
1297 return PTR_ERR(ctlr
->kworker_task
);
1299 kthread_init_work(&ctlr
->pump_messages
, spi_pump_messages
);
1302 * Controller config will indicate if this controller should run the
1303 * message pump with high (realtime) priority to reduce the transfer
1304 * latency on the bus by minimising the delay between a transfer
1305 * request and the scheduling of the message pump thread. Without this
1306 * setting the message pump thread will remain at default priority.
1309 dev_info(&ctlr
->dev
,
1310 "will run message pump with realtime priority\n");
1311 sched_setscheduler(ctlr
->kworker_task
, SCHED_FIFO
, ¶m
);
1318 * spi_get_next_queued_message() - called by driver to check for queued
1320 * @ctlr: the controller to check for queued messages
1322 * If there are more messages in the queue, the next message is returned from
1325 * Return: the next message in the queue, else NULL if the queue is empty.
1327 struct spi_message
*spi_get_next_queued_message(struct spi_controller
*ctlr
)
1329 struct spi_message
*next
;
1330 unsigned long flags
;
1332 /* get a pointer to the next message, if any */
1333 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1334 next
= list_first_entry_or_null(&ctlr
->queue
, struct spi_message
,
1336 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1340 EXPORT_SYMBOL_GPL(spi_get_next_queued_message
);
1343 * spi_finalize_current_message() - the current message is complete
1344 * @ctlr: the controller to return the message to
1346 * Called by the driver to notify the core that the message in the front of the
1347 * queue is complete and can be removed from the queue.
1349 void spi_finalize_current_message(struct spi_controller
*ctlr
)
1351 struct spi_message
*mesg
;
1352 unsigned long flags
;
1355 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1356 mesg
= ctlr
->cur_msg
;
1357 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1359 spi_unmap_msg(ctlr
, mesg
);
1361 if (ctlr
->cur_msg_prepared
&& ctlr
->unprepare_message
) {
1362 ret
= ctlr
->unprepare_message(ctlr
, mesg
);
1364 dev_err(&ctlr
->dev
, "failed to unprepare message: %d\n",
1369 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1370 ctlr
->cur_msg
= NULL
;
1371 ctlr
->cur_msg_prepared
= false;
1372 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1373 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1375 trace_spi_message_done(mesg
);
1379 mesg
->complete(mesg
->context
);
1381 EXPORT_SYMBOL_GPL(spi_finalize_current_message
);
1383 static int spi_start_queue(struct spi_controller
*ctlr
)
1385 unsigned long flags
;
1387 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1389 if (ctlr
->running
|| ctlr
->busy
) {
1390 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1394 ctlr
->running
= true;
1395 ctlr
->cur_msg
= NULL
;
1396 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1398 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1403 static int spi_stop_queue(struct spi_controller
*ctlr
)
1405 unsigned long flags
;
1406 unsigned limit
= 500;
1409 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1412 * This is a bit lame, but is optimized for the common execution path.
1413 * A wait_queue on the ctlr->busy could be used, but then the common
1414 * execution path (pump_messages) would be required to call wake_up or
1415 * friends on every SPI message. Do this instead.
1417 while ((!list_empty(&ctlr
->queue
) || ctlr
->busy
) && limit
--) {
1418 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1419 usleep_range(10000, 11000);
1420 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1423 if (!list_empty(&ctlr
->queue
) || ctlr
->busy
)
1426 ctlr
->running
= false;
1428 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1431 dev_warn(&ctlr
->dev
, "could not stop message queue\n");
1437 static int spi_destroy_queue(struct spi_controller
*ctlr
)
1441 ret
= spi_stop_queue(ctlr
);
1444 * kthread_flush_worker will block until all work is done.
1445 * If the reason that stop_queue timed out is that the work will never
1446 * finish, then it does no good to call flush/stop thread, so
1450 dev_err(&ctlr
->dev
, "problem destroying queue\n");
1454 kthread_flush_worker(&ctlr
->kworker
);
1455 kthread_stop(ctlr
->kworker_task
);
1460 static int __spi_queued_transfer(struct spi_device
*spi
,
1461 struct spi_message
*msg
,
1464 struct spi_controller
*ctlr
= spi
->controller
;
1465 unsigned long flags
;
1467 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1469 if (!ctlr
->running
) {
1470 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1473 msg
->actual_length
= 0;
1474 msg
->status
= -EINPROGRESS
;
1476 list_add_tail(&msg
->queue
, &ctlr
->queue
);
1477 if (!ctlr
->busy
&& need_pump
)
1478 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1480 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1485 * spi_queued_transfer - transfer function for queued transfers
1486 * @spi: spi device which is requesting transfer
1487 * @msg: spi message which is to handled is queued to driver queue
1489 * Return: zero on success, else a negative error code.
1491 static int spi_queued_transfer(struct spi_device
*spi
, struct spi_message
*msg
)
1493 return __spi_queued_transfer(spi
, msg
, true);
1496 static int spi_controller_initialize_queue(struct spi_controller
*ctlr
)
1500 ctlr
->transfer
= spi_queued_transfer
;
1501 if (!ctlr
->transfer_one_message
)
1502 ctlr
->transfer_one_message
= spi_transfer_one_message
;
1504 /* Initialize and start queue */
1505 ret
= spi_init_queue(ctlr
);
1507 dev_err(&ctlr
->dev
, "problem initializing queue\n");
1508 goto err_init_queue
;
1510 ctlr
->queued
= true;
1511 ret
= spi_start_queue(ctlr
);
1513 dev_err(&ctlr
->dev
, "problem starting queue\n");
1514 goto err_start_queue
;
1520 spi_destroy_queue(ctlr
);
1526 * spi_flush_queue - Send all pending messages in the queue from the callers'
1528 * @ctlr: controller to process queue for
1530 * This should be used when one wants to ensure all pending messages have been
1531 * sent before doing something. Is used by the spi-mem code to make sure SPI
1532 * memory operations do not preempt regular SPI transfers that have been queued
1533 * before the spi-mem operation.
1535 void spi_flush_queue(struct spi_controller
*ctlr
)
1537 if (ctlr
->transfer
== spi_queued_transfer
)
1538 __spi_pump_messages(ctlr
, false);
1541 /*-------------------------------------------------------------------------*/
1543 #if defined(CONFIG_OF)
1544 static int of_spi_parse_dt(struct spi_controller
*ctlr
, struct spi_device
*spi
,
1545 struct device_node
*nc
)
1550 /* Mode (clock phase/polarity/etc.) */
1551 if (of_property_read_bool(nc
, "spi-cpha"))
1552 spi
->mode
|= SPI_CPHA
;
1553 if (of_property_read_bool(nc
, "spi-cpol"))
1554 spi
->mode
|= SPI_CPOL
;
1555 if (of_property_read_bool(nc
, "spi-cs-high"))
1556 spi
->mode
|= SPI_CS_HIGH
;
1557 if (of_property_read_bool(nc
, "spi-3wire"))
1558 spi
->mode
|= SPI_3WIRE
;
1559 if (of_property_read_bool(nc
, "spi-lsb-first"))
1560 spi
->mode
|= SPI_LSB_FIRST
;
1562 /* Device DUAL/QUAD mode */
1563 if (!of_property_read_u32(nc
, "spi-tx-bus-width", &value
)) {
1568 spi
->mode
|= SPI_TX_DUAL
;
1571 spi
->mode
|= SPI_TX_QUAD
;
1574 dev_warn(&ctlr
->dev
,
1575 "spi-tx-bus-width %d not supported\n",
1581 if (!of_property_read_u32(nc
, "spi-rx-bus-width", &value
)) {
1586 spi
->mode
|= SPI_RX_DUAL
;
1589 spi
->mode
|= SPI_RX_QUAD
;
1592 dev_warn(&ctlr
->dev
,
1593 "spi-rx-bus-width %d not supported\n",
1599 if (spi_controller_is_slave(ctlr
)) {
1600 if (strcmp(nc
->name
, "slave")) {
1601 dev_err(&ctlr
->dev
, "%pOF is not called 'slave'\n",
1608 /* Device address */
1609 rc
= of_property_read_u32(nc
, "reg", &value
);
1611 dev_err(&ctlr
->dev
, "%pOF has no valid 'reg' property (%d)\n",
1615 spi
->chip_select
= value
;
1618 rc
= of_property_read_u32(nc
, "spi-max-frequency", &value
);
1621 "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc
, rc
);
1624 spi
->max_speed_hz
= value
;
1629 static struct spi_device
*
1630 of_register_spi_device(struct spi_controller
*ctlr
, struct device_node
*nc
)
1632 struct spi_device
*spi
;
1635 /* Alloc an spi_device */
1636 spi
= spi_alloc_device(ctlr
);
1638 dev_err(&ctlr
->dev
, "spi_device alloc error for %pOF\n", nc
);
1643 /* Select device driver */
1644 rc
= of_modalias_node(nc
, spi
->modalias
,
1645 sizeof(spi
->modalias
));
1647 dev_err(&ctlr
->dev
, "cannot find modalias for %pOF\n", nc
);
1651 rc
= of_spi_parse_dt(ctlr
, spi
, nc
);
1655 /* Store a pointer to the node in the device structure */
1657 spi
->dev
.of_node
= nc
;
1659 /* Register the new device */
1660 rc
= spi_add_device(spi
);
1662 dev_err(&ctlr
->dev
, "spi_device register error %pOF\n", nc
);
1663 goto err_of_node_put
;
1676 * of_register_spi_devices() - Register child devices onto the SPI bus
1677 * @ctlr: Pointer to spi_controller device
1679 * Registers an spi_device for each child node of controller node which
1680 * represents a valid SPI slave.
1682 static void of_register_spi_devices(struct spi_controller
*ctlr
)
1684 struct spi_device
*spi
;
1685 struct device_node
*nc
;
1687 if (!ctlr
->dev
.of_node
)
1690 for_each_available_child_of_node(ctlr
->dev
.of_node
, nc
) {
1691 if (of_node_test_and_set_flag(nc
, OF_POPULATED
))
1693 spi
= of_register_spi_device(ctlr
, nc
);
1695 dev_warn(&ctlr
->dev
,
1696 "Failed to create SPI device for %pOF\n", nc
);
1697 of_node_clear_flag(nc
, OF_POPULATED
);
1702 static void of_register_spi_devices(struct spi_controller
*ctlr
) { }
1706 static void acpi_spi_parse_apple_properties(struct spi_device
*spi
)
1708 struct acpi_device
*dev
= ACPI_COMPANION(&spi
->dev
);
1709 const union acpi_object
*obj
;
1711 if (!x86_apple_machine
)
1714 if (!acpi_dev_get_property(dev
, "spiSclkPeriod", ACPI_TYPE_BUFFER
, &obj
)
1715 && obj
->buffer
.length
>= 4)
1716 spi
->max_speed_hz
= NSEC_PER_SEC
/ *(u32
*)obj
->buffer
.pointer
;
1718 if (!acpi_dev_get_property(dev
, "spiWordSize", ACPI_TYPE_BUFFER
, &obj
)
1719 && obj
->buffer
.length
== 8)
1720 spi
->bits_per_word
= *(u64
*)obj
->buffer
.pointer
;
1722 if (!acpi_dev_get_property(dev
, "spiBitOrder", ACPI_TYPE_BUFFER
, &obj
)
1723 && obj
->buffer
.length
== 8 && !*(u64
*)obj
->buffer
.pointer
)
1724 spi
->mode
|= SPI_LSB_FIRST
;
1726 if (!acpi_dev_get_property(dev
, "spiSPO", ACPI_TYPE_BUFFER
, &obj
)
1727 && obj
->buffer
.length
== 8 && *(u64
*)obj
->buffer
.pointer
)
1728 spi
->mode
|= SPI_CPOL
;
1730 if (!acpi_dev_get_property(dev
, "spiSPH", ACPI_TYPE_BUFFER
, &obj
)
1731 && obj
->buffer
.length
== 8 && *(u64
*)obj
->buffer
.pointer
)
1732 spi
->mode
|= SPI_CPHA
;
1735 static int acpi_spi_add_resource(struct acpi_resource
*ares
, void *data
)
1737 struct spi_device
*spi
= data
;
1738 struct spi_controller
*ctlr
= spi
->controller
;
1740 if (ares
->type
== ACPI_RESOURCE_TYPE_SERIAL_BUS
) {
1741 struct acpi_resource_spi_serialbus
*sb
;
1743 sb
= &ares
->data
.spi_serial_bus
;
1744 if (sb
->type
== ACPI_RESOURCE_SERIAL_TYPE_SPI
) {
1746 * ACPI DeviceSelection numbering is handled by the
1747 * host controller driver in Windows and can vary
1748 * from driver to driver. In Linux we always expect
1749 * 0 .. max - 1 so we need to ask the driver to
1750 * translate between the two schemes.
1752 if (ctlr
->fw_translate_cs
) {
1753 int cs
= ctlr
->fw_translate_cs(ctlr
,
1754 sb
->device_selection
);
1757 spi
->chip_select
= cs
;
1759 spi
->chip_select
= sb
->device_selection
;
1762 spi
->max_speed_hz
= sb
->connection_speed
;
1764 if (sb
->clock_phase
== ACPI_SPI_SECOND_PHASE
)
1765 spi
->mode
|= SPI_CPHA
;
1766 if (sb
->clock_polarity
== ACPI_SPI_START_HIGH
)
1767 spi
->mode
|= SPI_CPOL
;
1768 if (sb
->device_polarity
== ACPI_SPI_ACTIVE_HIGH
)
1769 spi
->mode
|= SPI_CS_HIGH
;
1771 } else if (spi
->irq
< 0) {
1774 if (acpi_dev_resource_interrupt(ares
, 0, &r
))
1778 /* Always tell the ACPI core to skip this resource */
1782 static acpi_status
acpi_register_spi_device(struct spi_controller
*ctlr
,
1783 struct acpi_device
*adev
)
1785 struct list_head resource_list
;
1786 struct spi_device
*spi
;
1789 if (acpi_bus_get_status(adev
) || !adev
->status
.present
||
1790 acpi_device_enumerated(adev
))
1793 spi
= spi_alloc_device(ctlr
);
1795 dev_err(&ctlr
->dev
, "failed to allocate SPI device for %s\n",
1796 dev_name(&adev
->dev
));
1797 return AE_NO_MEMORY
;
1800 ACPI_COMPANION_SET(&spi
->dev
, adev
);
1803 INIT_LIST_HEAD(&resource_list
);
1804 ret
= acpi_dev_get_resources(adev
, &resource_list
,
1805 acpi_spi_add_resource
, spi
);
1806 acpi_dev_free_resource_list(&resource_list
);
1808 acpi_spi_parse_apple_properties(spi
);
1810 if (ret
< 0 || !spi
->max_speed_hz
) {
1815 acpi_set_modalias(adev
, acpi_device_hid(adev
), spi
->modalias
,
1816 sizeof(spi
->modalias
));
1819 spi
->irq
= acpi_dev_gpio_irq_get(adev
, 0);
1821 acpi_device_set_enumerated(adev
);
1823 adev
->power
.flags
.ignore_parent
= true;
1824 if (spi_add_device(spi
)) {
1825 adev
->power
.flags
.ignore_parent
= false;
1826 dev_err(&ctlr
->dev
, "failed to add SPI device %s from ACPI\n",
1827 dev_name(&adev
->dev
));
1834 static acpi_status
acpi_spi_add_device(acpi_handle handle
, u32 level
,
1835 void *data
, void **return_value
)
1837 struct spi_controller
*ctlr
= data
;
1838 struct acpi_device
*adev
;
1840 if (acpi_bus_get_device(handle
, &adev
))
1843 return acpi_register_spi_device(ctlr
, adev
);
1846 static void acpi_register_spi_devices(struct spi_controller
*ctlr
)
1851 handle
= ACPI_HANDLE(ctlr
->dev
.parent
);
1855 status
= acpi_walk_namespace(ACPI_TYPE_DEVICE
, handle
, 1,
1856 acpi_spi_add_device
, NULL
, ctlr
, NULL
);
1857 if (ACPI_FAILURE(status
))
1858 dev_warn(&ctlr
->dev
, "failed to enumerate SPI slaves\n");
1861 static inline void acpi_register_spi_devices(struct spi_controller
*ctlr
) {}
1862 #endif /* CONFIG_ACPI */
1864 static void spi_controller_release(struct device
*dev
)
1866 struct spi_controller
*ctlr
;
1868 ctlr
= container_of(dev
, struct spi_controller
, dev
);
1872 static struct class spi_master_class
= {
1873 .name
= "spi_master",
1874 .owner
= THIS_MODULE
,
1875 .dev_release
= spi_controller_release
,
1876 .dev_groups
= spi_master_groups
,
1879 #ifdef CONFIG_SPI_SLAVE
1881 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
1883 * @spi: device used for the current transfer
1885 int spi_slave_abort(struct spi_device
*spi
)
1887 struct spi_controller
*ctlr
= spi
->controller
;
1889 if (spi_controller_is_slave(ctlr
) && ctlr
->slave_abort
)
1890 return ctlr
->slave_abort(ctlr
);
1894 EXPORT_SYMBOL_GPL(spi_slave_abort
);
1896 static int match_true(struct device
*dev
, void *data
)
1901 static ssize_t
spi_slave_show(struct device
*dev
,
1902 struct device_attribute
*attr
, char *buf
)
1904 struct spi_controller
*ctlr
= container_of(dev
, struct spi_controller
,
1906 struct device
*child
;
1908 child
= device_find_child(&ctlr
->dev
, NULL
, match_true
);
1909 return sprintf(buf
, "%s\n",
1910 child
? to_spi_device(child
)->modalias
: NULL
);
1913 static ssize_t
spi_slave_store(struct device
*dev
,
1914 struct device_attribute
*attr
, const char *buf
,
1917 struct spi_controller
*ctlr
= container_of(dev
, struct spi_controller
,
1919 struct spi_device
*spi
;
1920 struct device
*child
;
1924 rc
= sscanf(buf
, "%31s", name
);
1925 if (rc
!= 1 || !name
[0])
1928 child
= device_find_child(&ctlr
->dev
, NULL
, match_true
);
1930 /* Remove registered slave */
1931 device_unregister(child
);
1935 if (strcmp(name
, "(null)")) {
1936 /* Register new slave */
1937 spi
= spi_alloc_device(ctlr
);
1941 strlcpy(spi
->modalias
, name
, sizeof(spi
->modalias
));
1943 rc
= spi_add_device(spi
);
1953 static DEVICE_ATTR(slave
, 0644, spi_slave_show
, spi_slave_store
);
1955 static struct attribute
*spi_slave_attrs
[] = {
1956 &dev_attr_slave
.attr
,
1960 static const struct attribute_group spi_slave_group
= {
1961 .attrs
= spi_slave_attrs
,
1964 static const struct attribute_group
*spi_slave_groups
[] = {
1965 &spi_controller_statistics_group
,
1970 static struct class spi_slave_class
= {
1971 .name
= "spi_slave",
1972 .owner
= THIS_MODULE
,
1973 .dev_release
= spi_controller_release
,
1974 .dev_groups
= spi_slave_groups
,
1977 extern struct class spi_slave_class
; /* dummy */
1981 * __spi_alloc_controller - allocate an SPI master or slave controller
1982 * @dev: the controller, possibly using the platform_bus
1983 * @size: how much zeroed driver-private data to allocate; the pointer to this
1984 * memory is in the driver_data field of the returned device,
1985 * accessible with spi_controller_get_devdata().
1986 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
1987 * slave (true) controller
1988 * Context: can sleep
1990 * This call is used only by SPI controller drivers, which are the
1991 * only ones directly touching chip registers. It's how they allocate
1992 * an spi_controller structure, prior to calling spi_register_controller().
1994 * This must be called from context that can sleep.
1996 * The caller is responsible for assigning the bus number and initializing the
1997 * controller's methods before calling spi_register_controller(); and (after
1998 * errors adding the device) calling spi_controller_put() to prevent a memory
2001 * Return: the SPI controller structure on success, else NULL.
2003 struct spi_controller
*__spi_alloc_controller(struct device
*dev
,
2004 unsigned int size
, bool slave
)
2006 struct spi_controller
*ctlr
;
2011 ctlr
= kzalloc(size
+ sizeof(*ctlr
), GFP_KERNEL
);
2015 device_initialize(&ctlr
->dev
);
2017 ctlr
->num_chipselect
= 1;
2018 ctlr
->slave
= slave
;
2019 if (IS_ENABLED(CONFIG_SPI_SLAVE
) && slave
)
2020 ctlr
->dev
.class = &spi_slave_class
;
2022 ctlr
->dev
.class = &spi_master_class
;
2023 ctlr
->dev
.parent
= dev
;
2024 pm_suspend_ignore_children(&ctlr
->dev
, true);
2025 spi_controller_set_devdata(ctlr
, &ctlr
[1]);
2029 EXPORT_SYMBOL_GPL(__spi_alloc_controller
);
2032 static int of_spi_register_master(struct spi_controller
*ctlr
)
2035 struct device_node
*np
= ctlr
->dev
.of_node
;
2040 nb
= of_gpio_named_count(np
, "cs-gpios");
2041 ctlr
->num_chipselect
= max_t(int, nb
, ctlr
->num_chipselect
);
2043 /* Return error only for an incorrectly formed cs-gpios property */
2044 if (nb
== 0 || nb
== -ENOENT
)
2049 cs
= devm_kzalloc(&ctlr
->dev
, sizeof(int) * ctlr
->num_chipselect
,
2051 ctlr
->cs_gpios
= cs
;
2053 if (!ctlr
->cs_gpios
)
2056 for (i
= 0; i
< ctlr
->num_chipselect
; i
++)
2059 for (i
= 0; i
< nb
; i
++)
2060 cs
[i
] = of_get_named_gpio(np
, "cs-gpios", i
);
2065 static int of_spi_register_master(struct spi_controller
*ctlr
)
2071 static int spi_controller_check_ops(struct spi_controller
*ctlr
)
2074 * The controller must at least implement one of the ->transfer()
2077 if (!ctlr
->transfer
&& !ctlr
->transfer_one
&&
2078 !ctlr
->transfer_one_message
)
2085 * spi_register_controller - register SPI master or slave controller
2086 * @ctlr: initialized master, originally from spi_alloc_master() or
2088 * Context: can sleep
2090 * SPI controllers connect to their drivers using some non-SPI bus,
2091 * such as the platform bus. The final stage of probe() in that code
2092 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2094 * SPI controllers use board specific (often SOC specific) bus numbers,
2095 * and board-specific addressing for SPI devices combines those numbers
2096 * with chip select numbers. Since SPI does not directly support dynamic
2097 * device identification, boards need configuration tables telling which
2098 * chip is at which address.
2100 * This must be called from context that can sleep. It returns zero on
2101 * success, else a negative error code (dropping the controller's refcount).
2102 * After a successful return, the caller is responsible for calling
2103 * spi_unregister_controller().
2105 * Return: zero on success, else a negative error code.
2107 int spi_register_controller(struct spi_controller
*ctlr
)
2109 struct device
*dev
= ctlr
->dev
.parent
;
2110 struct boardinfo
*bi
;
2111 int status
= -ENODEV
;
2112 int id
, first_dynamic
;
2118 * Make sure all necessary hooks are implemented before registering
2119 * the SPI controller.
2121 status
= spi_controller_check_ops(ctlr
);
2125 if (!spi_controller_is_slave(ctlr
)) {
2126 status
= of_spi_register_master(ctlr
);
2131 /* even if it's just one always-selected device, there must
2132 * be at least one chipselect
2134 if (ctlr
->num_chipselect
== 0)
2136 /* allocate dynamic bus number using Linux idr */
2137 if ((ctlr
->bus_num
< 0) && ctlr
->dev
.of_node
) {
2138 id
= of_alias_get_id(ctlr
->dev
.of_node
, "spi");
2141 mutex_lock(&board_lock
);
2142 id
= idr_alloc(&spi_master_idr
, ctlr
, ctlr
->bus_num
,
2143 ctlr
->bus_num
+ 1, GFP_KERNEL
);
2144 mutex_unlock(&board_lock
);
2145 if (WARN(id
< 0, "couldn't get idr"))
2146 return id
== -ENOSPC
? -EBUSY
: id
;
2149 if (ctlr
->bus_num
< 0) {
2150 first_dynamic
= of_alias_get_highest_id("spi");
2151 if (first_dynamic
< 0)
2156 mutex_lock(&board_lock
);
2157 id
= idr_alloc(&spi_master_idr
, ctlr
, first_dynamic
,
2159 mutex_unlock(&board_lock
);
2160 if (WARN(id
< 0, "couldn't get idr"))
2164 INIT_LIST_HEAD(&ctlr
->queue
);
2165 spin_lock_init(&ctlr
->queue_lock
);
2166 spin_lock_init(&ctlr
->bus_lock_spinlock
);
2167 mutex_init(&ctlr
->bus_lock_mutex
);
2168 mutex_init(&ctlr
->io_mutex
);
2169 ctlr
->bus_lock_flag
= 0;
2170 init_completion(&ctlr
->xfer_completion
);
2171 if (!ctlr
->max_dma_len
)
2172 ctlr
->max_dma_len
= INT_MAX
;
2174 /* register the device, then userspace will see it.
2175 * registration fails if the bus ID is in use.
2177 dev_set_name(&ctlr
->dev
, "spi%u", ctlr
->bus_num
);
2178 status
= device_add(&ctlr
->dev
);
2181 mutex_lock(&board_lock
);
2182 idr_remove(&spi_master_idr
, ctlr
->bus_num
);
2183 mutex_unlock(&board_lock
);
2186 dev_dbg(dev
, "registered %s %s\n",
2187 spi_controller_is_slave(ctlr
) ? "slave" : "master",
2188 dev_name(&ctlr
->dev
));
2190 /* If we're using a queued driver, start the queue */
2192 dev_info(dev
, "controller is unqueued, this is deprecated\n");
2194 status
= spi_controller_initialize_queue(ctlr
);
2196 device_del(&ctlr
->dev
);
2198 mutex_lock(&board_lock
);
2199 idr_remove(&spi_master_idr
, ctlr
->bus_num
);
2200 mutex_unlock(&board_lock
);
2204 /* add statistics */
2205 spin_lock_init(&ctlr
->statistics
.lock
);
2207 mutex_lock(&board_lock
);
2208 list_add_tail(&ctlr
->list
, &spi_controller_list
);
2209 list_for_each_entry(bi
, &board_list
, list
)
2210 spi_match_controller_to_boardinfo(ctlr
, &bi
->board_info
);
2211 mutex_unlock(&board_lock
);
2213 /* Register devices from the device tree and ACPI */
2214 of_register_spi_devices(ctlr
);
2215 acpi_register_spi_devices(ctlr
);
2219 EXPORT_SYMBOL_GPL(spi_register_controller
);
2221 static void devm_spi_unregister(struct device
*dev
, void *res
)
2223 spi_unregister_controller(*(struct spi_controller
**)res
);
2227 * devm_spi_register_controller - register managed SPI master or slave
2229 * @dev: device managing SPI controller
2230 * @ctlr: initialized controller, originally from spi_alloc_master() or
2232 * Context: can sleep
2234 * Register a SPI device as with spi_register_controller() which will
2235 * automatically be unregistered and freed.
2237 * Return: zero on success, else a negative error code.
2239 int devm_spi_register_controller(struct device
*dev
,
2240 struct spi_controller
*ctlr
)
2242 struct spi_controller
**ptr
;
2245 ptr
= devres_alloc(devm_spi_unregister
, sizeof(*ptr
), GFP_KERNEL
);
2249 ret
= spi_register_controller(ctlr
);
2252 devres_add(dev
, ptr
);
2259 EXPORT_SYMBOL_GPL(devm_spi_register_controller
);
2261 static int __unregister(struct device
*dev
, void *null
)
2263 spi_unregister_device(to_spi_device(dev
));
2268 * spi_unregister_controller - unregister SPI master or slave controller
2269 * @ctlr: the controller being unregistered
2270 * Context: can sleep
2272 * This call is used only by SPI controller drivers, which are the
2273 * only ones directly touching chip registers.
2275 * This must be called from context that can sleep.
2277 * Note that this function also drops a reference to the controller.
2279 void spi_unregister_controller(struct spi_controller
*ctlr
)
2281 struct spi_controller
*found
;
2282 int id
= ctlr
->bus_num
;
2285 /* First make sure that this controller was ever added */
2286 mutex_lock(&board_lock
);
2287 found
= idr_find(&spi_master_idr
, id
);
2288 mutex_unlock(&board_lock
);
2290 if (spi_destroy_queue(ctlr
))
2291 dev_err(&ctlr
->dev
, "queue remove failed\n");
2293 mutex_lock(&board_lock
);
2294 list_del(&ctlr
->list
);
2295 mutex_unlock(&board_lock
);
2297 dummy
= device_for_each_child(&ctlr
->dev
, NULL
, __unregister
);
2298 device_unregister(&ctlr
->dev
);
2300 mutex_lock(&board_lock
);
2302 idr_remove(&spi_master_idr
, id
);
2303 mutex_unlock(&board_lock
);
2305 EXPORT_SYMBOL_GPL(spi_unregister_controller
);
2307 int spi_controller_suspend(struct spi_controller
*ctlr
)
2311 /* Basically no-ops for non-queued controllers */
2315 ret
= spi_stop_queue(ctlr
);
2317 dev_err(&ctlr
->dev
, "queue stop failed\n");
2321 EXPORT_SYMBOL_GPL(spi_controller_suspend
);
2323 int spi_controller_resume(struct spi_controller
*ctlr
)
2330 ret
= spi_start_queue(ctlr
);
2332 dev_err(&ctlr
->dev
, "queue restart failed\n");
2336 EXPORT_SYMBOL_GPL(spi_controller_resume
);
2338 static int __spi_controller_match(struct device
*dev
, const void *data
)
2340 struct spi_controller
*ctlr
;
2341 const u16
*bus_num
= data
;
2343 ctlr
= container_of(dev
, struct spi_controller
, dev
);
2344 return ctlr
->bus_num
== *bus_num
;
2348 * spi_busnum_to_master - look up master associated with bus_num
2349 * @bus_num: the master's bus number
2350 * Context: can sleep
2352 * This call may be used with devices that are registered after
2353 * arch init time. It returns a refcounted pointer to the relevant
2354 * spi_controller (which the caller must release), or NULL if there is
2355 * no such master registered.
2357 * Return: the SPI master structure on success, else NULL.
2359 struct spi_controller
*spi_busnum_to_master(u16 bus_num
)
2362 struct spi_controller
*ctlr
= NULL
;
2364 dev
= class_find_device(&spi_master_class
, NULL
, &bus_num
,
2365 __spi_controller_match
);
2367 ctlr
= container_of(dev
, struct spi_controller
, dev
);
2368 /* reference got in class_find_device */
2371 EXPORT_SYMBOL_GPL(spi_busnum_to_master
);
2373 /*-------------------------------------------------------------------------*/
2375 /* Core methods for SPI resource management */
2378 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2379 * during the processing of a spi_message while using
2381 * @spi: the spi device for which we allocate memory
2382 * @release: the release code to execute for this resource
2383 * @size: size to alloc and return
2384 * @gfp: GFP allocation flags
2386 * Return: the pointer to the allocated data
2388 * This may get enhanced in the future to allocate from a memory pool
2389 * of the @spi_device or @spi_controller to avoid repeated allocations.
2391 void *spi_res_alloc(struct spi_device
*spi
,
2392 spi_res_release_t release
,
2393 size_t size
, gfp_t gfp
)
2395 struct spi_res
*sres
;
2397 sres
= kzalloc(sizeof(*sres
) + size
, gfp
);
2401 INIT_LIST_HEAD(&sres
->entry
);
2402 sres
->release
= release
;
2406 EXPORT_SYMBOL_GPL(spi_res_alloc
);
2409 * spi_res_free - free an spi resource
2410 * @res: pointer to the custom data of a resource
2413 void spi_res_free(void *res
)
2415 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2420 WARN_ON(!list_empty(&sres
->entry
));
2423 EXPORT_SYMBOL_GPL(spi_res_free
);
2426 * spi_res_add - add a spi_res to the spi_message
2427 * @message: the spi message
2428 * @res: the spi_resource
2430 void spi_res_add(struct spi_message
*message
, void *res
)
2432 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2434 WARN_ON(!list_empty(&sres
->entry
));
2435 list_add_tail(&sres
->entry
, &message
->resources
);
2437 EXPORT_SYMBOL_GPL(spi_res_add
);
2440 * spi_res_release - release all spi resources for this message
2441 * @ctlr: the @spi_controller
2442 * @message: the @spi_message
2444 void spi_res_release(struct spi_controller
*ctlr
, struct spi_message
*message
)
2446 struct spi_res
*res
;
2448 while (!list_empty(&message
->resources
)) {
2449 res
= list_last_entry(&message
->resources
,
2450 struct spi_res
, entry
);
2453 res
->release(ctlr
, message
, res
->data
);
2455 list_del(&res
->entry
);
2460 EXPORT_SYMBOL_GPL(spi_res_release
);
2462 /*-------------------------------------------------------------------------*/
2464 /* Core methods for spi_message alterations */
2466 static void __spi_replace_transfers_release(struct spi_controller
*ctlr
,
2467 struct spi_message
*msg
,
2470 struct spi_replaced_transfers
*rxfer
= res
;
2473 /* call extra callback if requested */
2475 rxfer
->release(ctlr
, msg
, res
);
2477 /* insert replaced transfers back into the message */
2478 list_splice(&rxfer
->replaced_transfers
, rxfer
->replaced_after
);
2480 /* remove the formerly inserted entries */
2481 for (i
= 0; i
< rxfer
->inserted
; i
++)
2482 list_del(&rxfer
->inserted_transfers
[i
].transfer_list
);
2486 * spi_replace_transfers - replace transfers with several transfers
2487 * and register change with spi_message.resources
2488 * @msg: the spi_message we work upon
2489 * @xfer_first: the first spi_transfer we want to replace
2490 * @remove: number of transfers to remove
2491 * @insert: the number of transfers we want to insert instead
2492 * @release: extra release code necessary in some circumstances
2493 * @extradatasize: extra data to allocate (with alignment guarantees
2494 * of struct @spi_transfer)
2497 * Returns: pointer to @spi_replaced_transfers,
2498 * PTR_ERR(...) in case of errors.
2500 struct spi_replaced_transfers
*spi_replace_transfers(
2501 struct spi_message
*msg
,
2502 struct spi_transfer
*xfer_first
,
2505 spi_replaced_release_t release
,
2506 size_t extradatasize
,
2509 struct spi_replaced_transfers
*rxfer
;
2510 struct spi_transfer
*xfer
;
2513 /* allocate the structure using spi_res */
2514 rxfer
= spi_res_alloc(msg
->spi
, __spi_replace_transfers_release
,
2515 insert
* sizeof(struct spi_transfer
)
2516 + sizeof(struct spi_replaced_transfers
)
2520 return ERR_PTR(-ENOMEM
);
2522 /* the release code to invoke before running the generic release */
2523 rxfer
->release
= release
;
2525 /* assign extradata */
2528 &rxfer
->inserted_transfers
[insert
];
2530 /* init the replaced_transfers list */
2531 INIT_LIST_HEAD(&rxfer
->replaced_transfers
);
2533 /* assign the list_entry after which we should reinsert
2534 * the @replaced_transfers - it may be spi_message.messages!
2536 rxfer
->replaced_after
= xfer_first
->transfer_list
.prev
;
2538 /* remove the requested number of transfers */
2539 for (i
= 0; i
< remove
; i
++) {
2540 /* if the entry after replaced_after it is msg->transfers
2541 * then we have been requested to remove more transfers
2542 * than are in the list
2544 if (rxfer
->replaced_after
->next
== &msg
->transfers
) {
2545 dev_err(&msg
->spi
->dev
,
2546 "requested to remove more spi_transfers than are available\n");
2547 /* insert replaced transfers back into the message */
2548 list_splice(&rxfer
->replaced_transfers
,
2549 rxfer
->replaced_after
);
2551 /* free the spi_replace_transfer structure */
2552 spi_res_free(rxfer
);
2554 /* and return with an error */
2555 return ERR_PTR(-EINVAL
);
2558 /* remove the entry after replaced_after from list of
2559 * transfers and add it to list of replaced_transfers
2561 list_move_tail(rxfer
->replaced_after
->next
,
2562 &rxfer
->replaced_transfers
);
2565 /* create copy of the given xfer with identical settings
2566 * based on the first transfer to get removed
2568 for (i
= 0; i
< insert
; i
++) {
2569 /* we need to run in reverse order */
2570 xfer
= &rxfer
->inserted_transfers
[insert
- 1 - i
];
2572 /* copy all spi_transfer data */
2573 memcpy(xfer
, xfer_first
, sizeof(*xfer
));
2576 list_add(&xfer
->transfer_list
, rxfer
->replaced_after
);
2578 /* clear cs_change and delay_usecs for all but the last */
2580 xfer
->cs_change
= false;
2581 xfer
->delay_usecs
= 0;
2585 /* set up inserted */
2586 rxfer
->inserted
= insert
;
2588 /* and register it with spi_res/spi_message */
2589 spi_res_add(msg
, rxfer
);
2593 EXPORT_SYMBOL_GPL(spi_replace_transfers
);
2595 static int __spi_split_transfer_maxsize(struct spi_controller
*ctlr
,
2596 struct spi_message
*msg
,
2597 struct spi_transfer
**xferp
,
2601 struct spi_transfer
*xfer
= *xferp
, *xfers
;
2602 struct spi_replaced_transfers
*srt
;
2606 /* warn once about this fact that we are splitting a transfer */
2607 dev_warn_once(&msg
->spi
->dev
,
2608 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2609 xfer
->len
, maxsize
);
2611 /* calculate how many we have to replace */
2612 count
= DIV_ROUND_UP(xfer
->len
, maxsize
);
2614 /* create replacement */
2615 srt
= spi_replace_transfers(msg
, xfer
, 1, count
, NULL
, 0, gfp
);
2617 return PTR_ERR(srt
);
2618 xfers
= srt
->inserted_transfers
;
2620 /* now handle each of those newly inserted spi_transfers
2621 * note that the replacements spi_transfers all are preset
2622 * to the same values as *xferp, so tx_buf, rx_buf and len
2623 * are all identical (as well as most others)
2624 * so we just have to fix up len and the pointers.
2626 * this also includes support for the depreciated
2627 * spi_message.is_dma_mapped interface
2630 /* the first transfer just needs the length modified, so we
2631 * run it outside the loop
2633 xfers
[0].len
= min_t(size_t, maxsize
, xfer
[0].len
);
2635 /* all the others need rx_buf/tx_buf also set */
2636 for (i
= 1, offset
= maxsize
; i
< count
; offset
+= maxsize
, i
++) {
2637 /* update rx_buf, tx_buf and dma */
2638 if (xfers
[i
].rx_buf
)
2639 xfers
[i
].rx_buf
+= offset
;
2640 if (xfers
[i
].rx_dma
)
2641 xfers
[i
].rx_dma
+= offset
;
2642 if (xfers
[i
].tx_buf
)
2643 xfers
[i
].tx_buf
+= offset
;
2644 if (xfers
[i
].tx_dma
)
2645 xfers
[i
].tx_dma
+= offset
;
2648 xfers
[i
].len
= min(maxsize
, xfers
[i
].len
- offset
);
2651 /* we set up xferp to the last entry we have inserted,
2652 * so that we skip those already split transfers
2654 *xferp
= &xfers
[count
- 1];
2656 /* increment statistics counters */
2657 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
,
2658 transfers_split_maxsize
);
2659 SPI_STATISTICS_INCREMENT_FIELD(&msg
->spi
->statistics
,
2660 transfers_split_maxsize
);
2666 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2667 * when an individual transfer exceeds a
2669 * @ctlr: the @spi_controller for this transfer
2670 * @msg: the @spi_message to transform
2671 * @maxsize: the maximum when to apply this
2672 * @gfp: GFP allocation flags
2674 * Return: status of transformation
2676 int spi_split_transfers_maxsize(struct spi_controller
*ctlr
,
2677 struct spi_message
*msg
,
2681 struct spi_transfer
*xfer
;
2684 /* iterate over the transfer_list,
2685 * but note that xfer is advanced to the last transfer inserted
2686 * to avoid checking sizes again unnecessarily (also xfer does
2687 * potentiall belong to a different list by the time the
2688 * replacement has happened
2690 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
2691 if (xfer
->len
> maxsize
) {
2692 ret
= __spi_split_transfer_maxsize(ctlr
, msg
, &xfer
,
2701 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize
);
2703 /*-------------------------------------------------------------------------*/
2705 /* Core methods for SPI controller protocol drivers. Some of the
2706 * other core methods are currently defined as inline functions.
2709 static int __spi_validate_bits_per_word(struct spi_controller
*ctlr
,
2712 if (ctlr
->bits_per_word_mask
) {
2713 /* Only 32 bits fit in the mask */
2714 if (bits_per_word
> 32)
2716 if (!(ctlr
->bits_per_word_mask
& SPI_BPW_MASK(bits_per_word
)))
2724 * spi_setup - setup SPI mode and clock rate
2725 * @spi: the device whose settings are being modified
2726 * Context: can sleep, and no requests are queued to the device
2728 * SPI protocol drivers may need to update the transfer mode if the
2729 * device doesn't work with its default. They may likewise need
2730 * to update clock rates or word sizes from initial values. This function
2731 * changes those settings, and must be called from a context that can sleep.
2732 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2733 * effect the next time the device is selected and data is transferred to
2734 * or from it. When this function returns, the spi device is deselected.
2736 * Note that this call will fail if the protocol driver specifies an option
2737 * that the underlying controller or its driver does not support. For
2738 * example, not all hardware supports wire transfers using nine bit words,
2739 * LSB-first wire encoding, or active-high chipselects.
2741 * Return: zero on success, else a negative error code.
2743 int spi_setup(struct spi_device
*spi
)
2745 unsigned bad_bits
, ugly_bits
;
2748 /* check mode to prevent that DUAL and QUAD set at the same time
2750 if (((spi
->mode
& SPI_TX_DUAL
) && (spi
->mode
& SPI_TX_QUAD
)) ||
2751 ((spi
->mode
& SPI_RX_DUAL
) && (spi
->mode
& SPI_RX_QUAD
))) {
2753 "setup: can not select dual and quad at the same time\n");
2756 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2758 if ((spi
->mode
& SPI_3WIRE
) && (spi
->mode
&
2759 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
)))
2761 /* help drivers fail *cleanly* when they need options
2762 * that aren't supported with their current controller
2764 bad_bits
= spi
->mode
& ~spi
->controller
->mode_bits
;
2765 ugly_bits
= bad_bits
&
2766 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
);
2769 "setup: ignoring unsupported mode bits %x\n",
2771 spi
->mode
&= ~ugly_bits
;
2772 bad_bits
&= ~ugly_bits
;
2775 dev_err(&spi
->dev
, "setup: unsupported mode bits %x\n",
2780 if (!spi
->bits_per_word
)
2781 spi
->bits_per_word
= 8;
2783 status
= __spi_validate_bits_per_word(spi
->controller
,
2784 spi
->bits_per_word
);
2788 if (!spi
->max_speed_hz
)
2789 spi
->max_speed_hz
= spi
->controller
->max_speed_hz
;
2791 if (spi
->controller
->setup
)
2792 status
= spi
->controller
->setup(spi
);
2794 spi_set_cs(spi
, false);
2796 dev_dbg(&spi
->dev
, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2797 (int) (spi
->mode
& (SPI_CPOL
| SPI_CPHA
)),
2798 (spi
->mode
& SPI_CS_HIGH
) ? "cs_high, " : "",
2799 (spi
->mode
& SPI_LSB_FIRST
) ? "lsb, " : "",
2800 (spi
->mode
& SPI_3WIRE
) ? "3wire, " : "",
2801 (spi
->mode
& SPI_LOOP
) ? "loopback, " : "",
2802 spi
->bits_per_word
, spi
->max_speed_hz
,
2807 EXPORT_SYMBOL_GPL(spi_setup
);
2809 static int __spi_validate(struct spi_device
*spi
, struct spi_message
*message
)
2811 struct spi_controller
*ctlr
= spi
->controller
;
2812 struct spi_transfer
*xfer
;
2815 if (list_empty(&message
->transfers
))
2818 /* Half-duplex links include original MicroWire, and ones with
2819 * only one data pin like SPI_3WIRE (switches direction) or where
2820 * either MOSI or MISO is missing. They can also be caused by
2821 * software limitations.
2823 if ((ctlr
->flags
& SPI_CONTROLLER_HALF_DUPLEX
) ||
2824 (spi
->mode
& SPI_3WIRE
)) {
2825 unsigned flags
= ctlr
->flags
;
2827 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
2828 if (xfer
->rx_buf
&& xfer
->tx_buf
)
2830 if ((flags
& SPI_CONTROLLER_NO_TX
) && xfer
->tx_buf
)
2832 if ((flags
& SPI_CONTROLLER_NO_RX
) && xfer
->rx_buf
)
2838 * Set transfer bits_per_word and max speed as spi device default if
2839 * it is not set for this transfer.
2840 * Set transfer tx_nbits and rx_nbits as single transfer default
2841 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2843 message
->frame_length
= 0;
2844 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
2845 message
->frame_length
+= xfer
->len
;
2846 if (!xfer
->bits_per_word
)
2847 xfer
->bits_per_word
= spi
->bits_per_word
;
2849 if (!xfer
->speed_hz
)
2850 xfer
->speed_hz
= spi
->max_speed_hz
;
2851 if (!xfer
->speed_hz
)
2852 xfer
->speed_hz
= ctlr
->max_speed_hz
;
2854 if (ctlr
->max_speed_hz
&& xfer
->speed_hz
> ctlr
->max_speed_hz
)
2855 xfer
->speed_hz
= ctlr
->max_speed_hz
;
2857 if (__spi_validate_bits_per_word(ctlr
, xfer
->bits_per_word
))
2861 * SPI transfer length should be multiple of SPI word size
2862 * where SPI word size should be power-of-two multiple
2864 if (xfer
->bits_per_word
<= 8)
2866 else if (xfer
->bits_per_word
<= 16)
2871 /* No partial transfers accepted */
2872 if (xfer
->len
% w_size
)
2875 if (xfer
->speed_hz
&& ctlr
->min_speed_hz
&&
2876 xfer
->speed_hz
< ctlr
->min_speed_hz
)
2879 if (xfer
->tx_buf
&& !xfer
->tx_nbits
)
2880 xfer
->tx_nbits
= SPI_NBITS_SINGLE
;
2881 if (xfer
->rx_buf
&& !xfer
->rx_nbits
)
2882 xfer
->rx_nbits
= SPI_NBITS_SINGLE
;
2883 /* check transfer tx/rx_nbits:
2884 * 1. check the value matches one of single, dual and quad
2885 * 2. check tx/rx_nbits match the mode in spi_device
2888 if (xfer
->tx_nbits
!= SPI_NBITS_SINGLE
&&
2889 xfer
->tx_nbits
!= SPI_NBITS_DUAL
&&
2890 xfer
->tx_nbits
!= SPI_NBITS_QUAD
)
2892 if ((xfer
->tx_nbits
== SPI_NBITS_DUAL
) &&
2893 !(spi
->mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
)))
2895 if ((xfer
->tx_nbits
== SPI_NBITS_QUAD
) &&
2896 !(spi
->mode
& SPI_TX_QUAD
))
2899 /* check transfer rx_nbits */
2901 if (xfer
->rx_nbits
!= SPI_NBITS_SINGLE
&&
2902 xfer
->rx_nbits
!= SPI_NBITS_DUAL
&&
2903 xfer
->rx_nbits
!= SPI_NBITS_QUAD
)
2905 if ((xfer
->rx_nbits
== SPI_NBITS_DUAL
) &&
2906 !(spi
->mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
)))
2908 if ((xfer
->rx_nbits
== SPI_NBITS_QUAD
) &&
2909 !(spi
->mode
& SPI_RX_QUAD
))
2914 message
->status
= -EINPROGRESS
;
2919 static int __spi_async(struct spi_device
*spi
, struct spi_message
*message
)
2921 struct spi_controller
*ctlr
= spi
->controller
;
2925 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
, spi_async
);
2926 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_async
);
2928 trace_spi_message_submit(message
);
2930 return ctlr
->transfer(spi
, message
);
2934 * spi_async - asynchronous SPI transfer
2935 * @spi: device with which data will be exchanged
2936 * @message: describes the data transfers, including completion callback
2937 * Context: any (irqs may be blocked, etc)
2939 * This call may be used in_irq and other contexts which can't sleep,
2940 * as well as from task contexts which can sleep.
2942 * The completion callback is invoked in a context which can't sleep.
2943 * Before that invocation, the value of message->status is undefined.
2944 * When the callback is issued, message->status holds either zero (to
2945 * indicate complete success) or a negative error code. After that
2946 * callback returns, the driver which issued the transfer request may
2947 * deallocate the associated memory; it's no longer in use by any SPI
2948 * core or controller driver code.
2950 * Note that although all messages to a spi_device are handled in
2951 * FIFO order, messages may go to different devices in other orders.
2952 * Some device might be higher priority, or have various "hard" access
2953 * time requirements, for example.
2955 * On detection of any fault during the transfer, processing of
2956 * the entire message is aborted, and the device is deselected.
2957 * Until returning from the associated message completion callback,
2958 * no other spi_message queued to that device will be processed.
2959 * (This rule applies equally to all the synchronous transfer calls,
2960 * which are wrappers around this core asynchronous primitive.)
2962 * Return: zero on success, else a negative error code.
2964 int spi_async(struct spi_device
*spi
, struct spi_message
*message
)
2966 struct spi_controller
*ctlr
= spi
->controller
;
2968 unsigned long flags
;
2970 ret
= __spi_validate(spi
, message
);
2974 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
2976 if (ctlr
->bus_lock_flag
)
2979 ret
= __spi_async(spi
, message
);
2981 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
2985 EXPORT_SYMBOL_GPL(spi_async
);
2988 * spi_async_locked - version of spi_async with exclusive bus usage
2989 * @spi: device with which data will be exchanged
2990 * @message: describes the data transfers, including completion callback
2991 * Context: any (irqs may be blocked, etc)
2993 * This call may be used in_irq and other contexts which can't sleep,
2994 * as well as from task contexts which can sleep.
2996 * The completion callback is invoked in a context which can't sleep.
2997 * Before that invocation, the value of message->status is undefined.
2998 * When the callback is issued, message->status holds either zero (to
2999 * indicate complete success) or a negative error code. After that
3000 * callback returns, the driver which issued the transfer request may
3001 * deallocate the associated memory; it's no longer in use by any SPI
3002 * core or controller driver code.
3004 * Note that although all messages to a spi_device are handled in
3005 * FIFO order, messages may go to different devices in other orders.
3006 * Some device might be higher priority, or have various "hard" access
3007 * time requirements, for example.
3009 * On detection of any fault during the transfer, processing of
3010 * the entire message is aborted, and the device is deselected.
3011 * Until returning from the associated message completion callback,
3012 * no other spi_message queued to that device will be processed.
3013 * (This rule applies equally to all the synchronous transfer calls,
3014 * which are wrappers around this core asynchronous primitive.)
3016 * Return: zero on success, else a negative error code.
3018 int spi_async_locked(struct spi_device
*spi
, struct spi_message
*message
)
3020 struct spi_controller
*ctlr
= spi
->controller
;
3022 unsigned long flags
;
3024 ret
= __spi_validate(spi
, message
);
3028 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
3030 ret
= __spi_async(spi
, message
);
3032 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
3037 EXPORT_SYMBOL_GPL(spi_async_locked
);
3040 int spi_flash_read(struct spi_device
*spi
,
3041 struct spi_flash_read_message
*msg
)
3044 struct spi_controller
*master
= spi
->controller
;
3045 struct device
*rx_dev
= NULL
;
3048 if ((msg
->opcode_nbits
== SPI_NBITS_DUAL
||
3049 msg
->addr_nbits
== SPI_NBITS_DUAL
) &&
3050 !(spi
->mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
)))
3052 if ((msg
->opcode_nbits
== SPI_NBITS_QUAD
||
3053 msg
->addr_nbits
== SPI_NBITS_QUAD
) &&
3054 !(spi
->mode
& SPI_TX_QUAD
))
3056 if (msg
->data_nbits
== SPI_NBITS_DUAL
&&
3057 !(spi
->mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
)))
3059 if (msg
->data_nbits
== SPI_NBITS_QUAD
&&
3060 !(spi
->mode
& SPI_RX_QUAD
))
3063 if (master
->auto_runtime_pm
) {
3064 ret
= pm_runtime_get_sync(master
->dev
.parent
);
3066 dev_err(&master
->dev
, "Failed to power device: %d\n",
3072 mutex_lock(&master
->bus_lock_mutex
);
3073 mutex_lock(&master
->io_mutex
);
3074 if (master
->dma_rx
&& master
->spi_flash_can_dma(spi
, msg
)) {
3075 rx_dev
= master
->dma_rx
->device
->dev
;
3076 ret
= spi_map_buf(master
, rx_dev
, &msg
->rx_sg
,
3080 msg
->cur_msg_mapped
= true;
3082 ret
= master
->spi_flash_read(spi
, msg
);
3083 if (msg
->cur_msg_mapped
)
3084 spi_unmap_buf(master
, rx_dev
, &msg
->rx_sg
,
3086 mutex_unlock(&master
->io_mutex
);
3087 mutex_unlock(&master
->bus_lock_mutex
);
3089 if (master
->auto_runtime_pm
)
3090 pm_runtime_put(master
->dev
.parent
);
3094 EXPORT_SYMBOL_GPL(spi_flash_read
);
3096 /*-------------------------------------------------------------------------*/
3098 /* Utility methods for SPI protocol drivers, layered on
3099 * top of the core. Some other utility methods are defined as
3103 static void spi_complete(void *arg
)
3108 static int __spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
3110 DECLARE_COMPLETION_ONSTACK(done
);
3112 struct spi_controller
*ctlr
= spi
->controller
;
3113 unsigned long flags
;
3115 status
= __spi_validate(spi
, message
);
3119 message
->complete
= spi_complete
;
3120 message
->context
= &done
;
3123 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
, spi_sync
);
3124 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_sync
);
3126 /* If we're not using the legacy transfer method then we will
3127 * try to transfer in the calling context so special case.
3128 * This code would be less tricky if we could remove the
3129 * support for driver implemented message queues.
3131 if (ctlr
->transfer
== spi_queued_transfer
) {
3132 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
3134 trace_spi_message_submit(message
);
3136 status
= __spi_queued_transfer(spi
, message
, false);
3138 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
3140 status
= spi_async_locked(spi
, message
);
3144 /* Push out the messages in the calling context if we
3147 if (ctlr
->transfer
== spi_queued_transfer
) {
3148 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
,
3149 spi_sync_immediate
);
3150 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
,
3151 spi_sync_immediate
);
3152 __spi_pump_messages(ctlr
, false);
3155 wait_for_completion(&done
);
3156 status
= message
->status
;
3158 message
->context
= NULL
;
3163 * spi_sync - blocking/synchronous SPI data transfers
3164 * @spi: device with which data will be exchanged
3165 * @message: describes the data transfers
3166 * Context: can sleep
3168 * This call may only be used from a context that may sleep. The sleep
3169 * is non-interruptible, and has no timeout. Low-overhead controller
3170 * drivers may DMA directly into and out of the message buffers.
3172 * Note that the SPI device's chip select is active during the message,
3173 * and then is normally disabled between messages. Drivers for some
3174 * frequently-used devices may want to minimize costs of selecting a chip,
3175 * by leaving it selected in anticipation that the next message will go
3176 * to the same chip. (That may increase power usage.)
3178 * Also, the caller is guaranteeing that the memory associated with the
3179 * message will not be freed before this call returns.
3181 * Return: zero on success, else a negative error code.
3183 int spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
3187 mutex_lock(&spi
->controller
->bus_lock_mutex
);
3188 ret
= __spi_sync(spi
, message
);
3189 mutex_unlock(&spi
->controller
->bus_lock_mutex
);
3193 EXPORT_SYMBOL_GPL(spi_sync
);
3196 * spi_sync_locked - version of spi_sync with exclusive bus usage
3197 * @spi: device with which data will be exchanged
3198 * @message: describes the data transfers
3199 * Context: can sleep
3201 * This call may only be used from a context that may sleep. The sleep
3202 * is non-interruptible, and has no timeout. Low-overhead controller
3203 * drivers may DMA directly into and out of the message buffers.
3205 * This call should be used by drivers that require exclusive access to the
3206 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3207 * be released by a spi_bus_unlock call when the exclusive access is over.
3209 * Return: zero on success, else a negative error code.
3211 int spi_sync_locked(struct spi_device
*spi
, struct spi_message
*message
)
3213 return __spi_sync(spi
, message
);
3215 EXPORT_SYMBOL_GPL(spi_sync_locked
);
3218 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3219 * @ctlr: SPI bus master that should be locked for exclusive bus access
3220 * Context: can sleep
3222 * This call may only be used from a context that may sleep. The sleep
3223 * is non-interruptible, and has no timeout.
3225 * This call should be used by drivers that require exclusive access to the
3226 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3227 * exclusive access is over. Data transfer must be done by spi_sync_locked
3228 * and spi_async_locked calls when the SPI bus lock is held.
3230 * Return: always zero.
3232 int spi_bus_lock(struct spi_controller
*ctlr
)
3234 unsigned long flags
;
3236 mutex_lock(&ctlr
->bus_lock_mutex
);
3238 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
3239 ctlr
->bus_lock_flag
= 1;
3240 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
3242 /* mutex remains locked until spi_bus_unlock is called */
3246 EXPORT_SYMBOL_GPL(spi_bus_lock
);
3249 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3250 * @ctlr: SPI bus master that was locked for exclusive bus access
3251 * Context: can sleep
3253 * This call may only be used from a context that may sleep. The sleep
3254 * is non-interruptible, and has no timeout.
3256 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3259 * Return: always zero.
3261 int spi_bus_unlock(struct spi_controller
*ctlr
)
3263 ctlr
->bus_lock_flag
= 0;
3265 mutex_unlock(&ctlr
->bus_lock_mutex
);
3269 EXPORT_SYMBOL_GPL(spi_bus_unlock
);
3271 /* portable code must never pass more than 32 bytes */
3272 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3277 * spi_write_then_read - SPI synchronous write followed by read
3278 * @spi: device with which data will be exchanged
3279 * @txbuf: data to be written (need not be dma-safe)
3280 * @n_tx: size of txbuf, in bytes
3281 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3282 * @n_rx: size of rxbuf, in bytes
3283 * Context: can sleep
3285 * This performs a half duplex MicroWire style transaction with the
3286 * device, sending txbuf and then reading rxbuf. The return value
3287 * is zero for success, else a negative errno status code.
3288 * This call may only be used from a context that may sleep.
3290 * Parameters to this routine are always copied using a small buffer;
3291 * portable code should never use this for more than 32 bytes.
3292 * Performance-sensitive or bulk transfer code should instead use
3293 * spi_{async,sync}() calls with dma-safe buffers.
3295 * Return: zero on success, else a negative error code.
3297 int spi_write_then_read(struct spi_device
*spi
,
3298 const void *txbuf
, unsigned n_tx
,
3299 void *rxbuf
, unsigned n_rx
)
3301 static DEFINE_MUTEX(lock
);
3304 struct spi_message message
;
3305 struct spi_transfer x
[2];
3308 /* Use preallocated DMA-safe buffer if we can. We can't avoid
3309 * copying here, (as a pure convenience thing), but we can
3310 * keep heap costs out of the hot path unless someone else is
3311 * using the pre-allocated buffer or the transfer is too large.
3313 if ((n_tx
+ n_rx
) > SPI_BUFSIZ
|| !mutex_trylock(&lock
)) {
3314 local_buf
= kmalloc(max((unsigned)SPI_BUFSIZ
, n_tx
+ n_rx
),
3315 GFP_KERNEL
| GFP_DMA
);
3322 spi_message_init(&message
);
3323 memset(x
, 0, sizeof(x
));
3326 spi_message_add_tail(&x
[0], &message
);
3330 spi_message_add_tail(&x
[1], &message
);
3333 memcpy(local_buf
, txbuf
, n_tx
);
3334 x
[0].tx_buf
= local_buf
;
3335 x
[1].rx_buf
= local_buf
+ n_tx
;
3338 status
= spi_sync(spi
, &message
);
3340 memcpy(rxbuf
, x
[1].rx_buf
, n_rx
);
3342 if (x
[0].tx_buf
== buf
)
3343 mutex_unlock(&lock
);
3349 EXPORT_SYMBOL_GPL(spi_write_then_read
);
3351 /*-------------------------------------------------------------------------*/
3353 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3354 static int __spi_of_device_match(struct device
*dev
, void *data
)
3356 return dev
->of_node
== data
;
3359 /* must call put_device() when done with returned spi_device device */
3360 static struct spi_device
*of_find_spi_device_by_node(struct device_node
*node
)
3362 struct device
*dev
= bus_find_device(&spi_bus_type
, NULL
, node
,
3363 __spi_of_device_match
);
3364 return dev
? to_spi_device(dev
) : NULL
;
3367 static int __spi_of_controller_match(struct device
*dev
, const void *data
)
3369 return dev
->of_node
== data
;
3372 /* the spi controllers are not using spi_bus, so we find it with another way */
3373 static struct spi_controller
*of_find_spi_controller_by_node(struct device_node
*node
)
3377 dev
= class_find_device(&spi_master_class
, NULL
, node
,
3378 __spi_of_controller_match
);
3379 if (!dev
&& IS_ENABLED(CONFIG_SPI_SLAVE
))
3380 dev
= class_find_device(&spi_slave_class
, NULL
, node
,
3381 __spi_of_controller_match
);
3385 /* reference got in class_find_device */
3386 return container_of(dev
, struct spi_controller
, dev
);
3389 static int of_spi_notify(struct notifier_block
*nb
, unsigned long action
,
3392 struct of_reconfig_data
*rd
= arg
;
3393 struct spi_controller
*ctlr
;
3394 struct spi_device
*spi
;
3396 switch (of_reconfig_get_state_change(action
, arg
)) {
3397 case OF_RECONFIG_CHANGE_ADD
:
3398 ctlr
= of_find_spi_controller_by_node(rd
->dn
->parent
);
3400 return NOTIFY_OK
; /* not for us */
3402 if (of_node_test_and_set_flag(rd
->dn
, OF_POPULATED
)) {
3403 put_device(&ctlr
->dev
);
3407 spi
= of_register_spi_device(ctlr
, rd
->dn
);
3408 put_device(&ctlr
->dev
);
3411 pr_err("%s: failed to create for '%pOF'\n",
3413 of_node_clear_flag(rd
->dn
, OF_POPULATED
);
3414 return notifier_from_errno(PTR_ERR(spi
));
3418 case OF_RECONFIG_CHANGE_REMOVE
:
3419 /* already depopulated? */
3420 if (!of_node_check_flag(rd
->dn
, OF_POPULATED
))
3423 /* find our device by node */
3424 spi
= of_find_spi_device_by_node(rd
->dn
);
3426 return NOTIFY_OK
; /* no? not meant for us */
3428 /* unregister takes one ref away */
3429 spi_unregister_device(spi
);
3431 /* and put the reference of the find */
3432 put_device(&spi
->dev
);
3439 static struct notifier_block spi_of_notifier
= {
3440 .notifier_call
= of_spi_notify
,
3442 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3443 extern struct notifier_block spi_of_notifier
;
3444 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3446 #if IS_ENABLED(CONFIG_ACPI)
3447 static int spi_acpi_controller_match(struct device
*dev
, const void *data
)
3449 return ACPI_COMPANION(dev
->parent
) == data
;
3452 static int spi_acpi_device_match(struct device
*dev
, void *data
)
3454 return ACPI_COMPANION(dev
) == data
;
3457 static struct spi_controller
*acpi_spi_find_controller_by_adev(struct acpi_device
*adev
)
3461 dev
= class_find_device(&spi_master_class
, NULL
, adev
,
3462 spi_acpi_controller_match
);
3463 if (!dev
&& IS_ENABLED(CONFIG_SPI_SLAVE
))
3464 dev
= class_find_device(&spi_slave_class
, NULL
, adev
,
3465 spi_acpi_controller_match
);
3469 return container_of(dev
, struct spi_controller
, dev
);
3472 static struct spi_device
*acpi_spi_find_device_by_adev(struct acpi_device
*adev
)
3476 dev
= bus_find_device(&spi_bus_type
, NULL
, adev
, spi_acpi_device_match
);
3478 return dev
? to_spi_device(dev
) : NULL
;
3481 static int acpi_spi_notify(struct notifier_block
*nb
, unsigned long value
,
3484 struct acpi_device
*adev
= arg
;
3485 struct spi_controller
*ctlr
;
3486 struct spi_device
*spi
;
3489 case ACPI_RECONFIG_DEVICE_ADD
:
3490 ctlr
= acpi_spi_find_controller_by_adev(adev
->parent
);
3494 acpi_register_spi_device(ctlr
, adev
);
3495 put_device(&ctlr
->dev
);
3497 case ACPI_RECONFIG_DEVICE_REMOVE
:
3498 if (!acpi_device_enumerated(adev
))
3501 spi
= acpi_spi_find_device_by_adev(adev
);
3505 spi_unregister_device(spi
);
3506 put_device(&spi
->dev
);
3513 static struct notifier_block spi_acpi_notifier
= {
3514 .notifier_call
= acpi_spi_notify
,
3517 extern struct notifier_block spi_acpi_notifier
;
3520 static int __init
spi_init(void)
3524 buf
= kmalloc(SPI_BUFSIZ
, GFP_KERNEL
);
3530 status
= bus_register(&spi_bus_type
);
3534 status
= class_register(&spi_master_class
);
3538 if (IS_ENABLED(CONFIG_SPI_SLAVE
)) {
3539 status
= class_register(&spi_slave_class
);
3544 if (IS_ENABLED(CONFIG_OF_DYNAMIC
))
3545 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier
));
3546 if (IS_ENABLED(CONFIG_ACPI
))
3547 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier
));
3552 class_unregister(&spi_master_class
);
3554 bus_unregister(&spi_bus_type
);
3562 /* board_info is normally registered in arch_initcall(),
3563 * but even essential drivers wait till later
3565 * REVISIT only boardinfo really needs static linking. the rest (device and
3566 * driver registration) _could_ be dynamically linked (modular) ... costs
3567 * include needing to have boardinfo data structures be much more public.
3569 postcore_initcall(spi_init
);