1 // SPDX-License-Identifier: GPL-2.0-or-later
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/of_gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm_domain.h>
25 #include <linux/property.h>
26 #include <linux/export.h>
27 #include <linux/sched/rt.h>
28 #include <uapi/linux/sched/types.h>
29 #include <linux/delay.h>
30 #include <linux/kthread.h>
31 #include <linux/ioport.h>
32 #include <linux/acpi.h>
33 #include <linux/highmem.h>
34 #include <linux/idr.h>
35 #include <linux/platform_data/x86/apple.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/spi.h>
39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start
);
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop
);
42 #include "internals.h"
44 static DEFINE_IDR(spi_master_idr
);
46 static void spidev_release(struct device
*dev
)
48 struct spi_device
*spi
= to_spi_device(dev
);
50 spi_controller_put(spi
->controller
);
51 kfree(spi
->driver_override
);
56 modalias_show(struct device
*dev
, struct device_attribute
*a
, char *buf
)
58 const struct spi_device
*spi
= to_spi_device(dev
);
61 len
= acpi_device_modalias(dev
, buf
, PAGE_SIZE
- 1);
65 return sprintf(buf
, "%s%s\n", SPI_MODULE_PREFIX
, spi
->modalias
);
67 static DEVICE_ATTR_RO(modalias
);
69 static ssize_t
driver_override_store(struct device
*dev
,
70 struct device_attribute
*a
,
71 const char *buf
, size_t count
)
73 struct spi_device
*spi
= to_spi_device(dev
);
74 const char *end
= memchr(buf
, '\n', count
);
75 const size_t len
= end
? end
- buf
: count
;
76 const char *driver_override
, *old
;
78 /* We need to keep extra room for a newline when displaying value */
79 if (len
>= (PAGE_SIZE
- 1))
82 driver_override
= kstrndup(buf
, len
, GFP_KERNEL
);
87 old
= spi
->driver_override
;
89 spi
->driver_override
= driver_override
;
91 /* Emptry string, disable driver override */
92 spi
->driver_override
= NULL
;
93 kfree(driver_override
);
101 static ssize_t
driver_override_show(struct device
*dev
,
102 struct device_attribute
*a
, char *buf
)
104 const struct spi_device
*spi
= to_spi_device(dev
);
108 len
= snprintf(buf
, PAGE_SIZE
, "%s\n", spi
->driver_override
? : "");
112 static DEVICE_ATTR_RW(driver_override
);
114 #define SPI_STATISTICS_ATTRS(field, file) \
115 static ssize_t spi_controller_##field##_show(struct device *dev, \
116 struct device_attribute *attr, \
119 struct spi_controller *ctlr = container_of(dev, \
120 struct spi_controller, dev); \
121 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
123 static struct device_attribute dev_attr_spi_controller_##field = { \
124 .attr = { .name = file, .mode = 0444 }, \
125 .show = spi_controller_##field##_show, \
127 static ssize_t spi_device_##field##_show(struct device *dev, \
128 struct device_attribute *attr, \
131 struct spi_device *spi = to_spi_device(dev); \
132 return spi_statistics_##field##_show(&spi->statistics, buf); \
134 static struct device_attribute dev_attr_spi_device_##field = { \
135 .attr = { .name = file, .mode = 0444 }, \
136 .show = spi_device_##field##_show, \
139 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
140 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
143 unsigned long flags; \
145 spin_lock_irqsave(&stat->lock, flags); \
146 len = sprintf(buf, format_string, stat->field); \
147 spin_unlock_irqrestore(&stat->lock, flags); \
150 SPI_STATISTICS_ATTRS(name, file)
152 #define SPI_STATISTICS_SHOW(field, format_string) \
153 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
154 field, format_string)
156 SPI_STATISTICS_SHOW(messages
, "%lu");
157 SPI_STATISTICS_SHOW(transfers
, "%lu");
158 SPI_STATISTICS_SHOW(errors
, "%lu");
159 SPI_STATISTICS_SHOW(timedout
, "%lu");
161 SPI_STATISTICS_SHOW(spi_sync
, "%lu");
162 SPI_STATISTICS_SHOW(spi_sync_immediate
, "%lu");
163 SPI_STATISTICS_SHOW(spi_async
, "%lu");
165 SPI_STATISTICS_SHOW(bytes
, "%llu");
166 SPI_STATISTICS_SHOW(bytes_rx
, "%llu");
167 SPI_STATISTICS_SHOW(bytes_tx
, "%llu");
169 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
170 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
171 "transfer_bytes_histo_" number, \
172 transfer_bytes_histo[index], "%lu")
173 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
174 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
175 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
176 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
191 SPI_STATISTICS_SHOW(transfers_split_maxsize
, "%lu");
193 static struct attribute
*spi_dev_attrs
[] = {
194 &dev_attr_modalias
.attr
,
195 &dev_attr_driver_override
.attr
,
199 static const struct attribute_group spi_dev_group
= {
200 .attrs
= spi_dev_attrs
,
203 static struct attribute
*spi_device_statistics_attrs
[] = {
204 &dev_attr_spi_device_messages
.attr
,
205 &dev_attr_spi_device_transfers
.attr
,
206 &dev_attr_spi_device_errors
.attr
,
207 &dev_attr_spi_device_timedout
.attr
,
208 &dev_attr_spi_device_spi_sync
.attr
,
209 &dev_attr_spi_device_spi_sync_immediate
.attr
,
210 &dev_attr_spi_device_spi_async
.attr
,
211 &dev_attr_spi_device_bytes
.attr
,
212 &dev_attr_spi_device_bytes_rx
.attr
,
213 &dev_attr_spi_device_bytes_tx
.attr
,
214 &dev_attr_spi_device_transfer_bytes_histo0
.attr
,
215 &dev_attr_spi_device_transfer_bytes_histo1
.attr
,
216 &dev_attr_spi_device_transfer_bytes_histo2
.attr
,
217 &dev_attr_spi_device_transfer_bytes_histo3
.attr
,
218 &dev_attr_spi_device_transfer_bytes_histo4
.attr
,
219 &dev_attr_spi_device_transfer_bytes_histo5
.attr
,
220 &dev_attr_spi_device_transfer_bytes_histo6
.attr
,
221 &dev_attr_spi_device_transfer_bytes_histo7
.attr
,
222 &dev_attr_spi_device_transfer_bytes_histo8
.attr
,
223 &dev_attr_spi_device_transfer_bytes_histo9
.attr
,
224 &dev_attr_spi_device_transfer_bytes_histo10
.attr
,
225 &dev_attr_spi_device_transfer_bytes_histo11
.attr
,
226 &dev_attr_spi_device_transfer_bytes_histo12
.attr
,
227 &dev_attr_spi_device_transfer_bytes_histo13
.attr
,
228 &dev_attr_spi_device_transfer_bytes_histo14
.attr
,
229 &dev_attr_spi_device_transfer_bytes_histo15
.attr
,
230 &dev_attr_spi_device_transfer_bytes_histo16
.attr
,
231 &dev_attr_spi_device_transfers_split_maxsize
.attr
,
235 static const struct attribute_group spi_device_statistics_group
= {
236 .name
= "statistics",
237 .attrs
= spi_device_statistics_attrs
,
240 static const struct attribute_group
*spi_dev_groups
[] = {
242 &spi_device_statistics_group
,
246 static struct attribute
*spi_controller_statistics_attrs
[] = {
247 &dev_attr_spi_controller_messages
.attr
,
248 &dev_attr_spi_controller_transfers
.attr
,
249 &dev_attr_spi_controller_errors
.attr
,
250 &dev_attr_spi_controller_timedout
.attr
,
251 &dev_attr_spi_controller_spi_sync
.attr
,
252 &dev_attr_spi_controller_spi_sync_immediate
.attr
,
253 &dev_attr_spi_controller_spi_async
.attr
,
254 &dev_attr_spi_controller_bytes
.attr
,
255 &dev_attr_spi_controller_bytes_rx
.attr
,
256 &dev_attr_spi_controller_bytes_tx
.attr
,
257 &dev_attr_spi_controller_transfer_bytes_histo0
.attr
,
258 &dev_attr_spi_controller_transfer_bytes_histo1
.attr
,
259 &dev_attr_spi_controller_transfer_bytes_histo2
.attr
,
260 &dev_attr_spi_controller_transfer_bytes_histo3
.attr
,
261 &dev_attr_spi_controller_transfer_bytes_histo4
.attr
,
262 &dev_attr_spi_controller_transfer_bytes_histo5
.attr
,
263 &dev_attr_spi_controller_transfer_bytes_histo6
.attr
,
264 &dev_attr_spi_controller_transfer_bytes_histo7
.attr
,
265 &dev_attr_spi_controller_transfer_bytes_histo8
.attr
,
266 &dev_attr_spi_controller_transfer_bytes_histo9
.attr
,
267 &dev_attr_spi_controller_transfer_bytes_histo10
.attr
,
268 &dev_attr_spi_controller_transfer_bytes_histo11
.attr
,
269 &dev_attr_spi_controller_transfer_bytes_histo12
.attr
,
270 &dev_attr_spi_controller_transfer_bytes_histo13
.attr
,
271 &dev_attr_spi_controller_transfer_bytes_histo14
.attr
,
272 &dev_attr_spi_controller_transfer_bytes_histo15
.attr
,
273 &dev_attr_spi_controller_transfer_bytes_histo16
.attr
,
274 &dev_attr_spi_controller_transfers_split_maxsize
.attr
,
278 static const struct attribute_group spi_controller_statistics_group
= {
279 .name
= "statistics",
280 .attrs
= spi_controller_statistics_attrs
,
283 static const struct attribute_group
*spi_master_groups
[] = {
284 &spi_controller_statistics_group
,
288 void spi_statistics_add_transfer_stats(struct spi_statistics
*stats
,
289 struct spi_transfer
*xfer
,
290 struct spi_controller
*ctlr
)
293 int l2len
= min(fls(xfer
->len
), SPI_STATISTICS_HISTO_SIZE
) - 1;
298 spin_lock_irqsave(&stats
->lock
, flags
);
301 stats
->transfer_bytes_histo
[l2len
]++;
303 stats
->bytes
+= xfer
->len
;
304 if ((xfer
->tx_buf
) &&
305 (xfer
->tx_buf
!= ctlr
->dummy_tx
))
306 stats
->bytes_tx
+= xfer
->len
;
307 if ((xfer
->rx_buf
) &&
308 (xfer
->rx_buf
!= ctlr
->dummy_rx
))
309 stats
->bytes_rx
+= xfer
->len
;
311 spin_unlock_irqrestore(&stats
->lock
, flags
);
313 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats
);
315 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
316 * and the sysfs version makes coldplug work too.
319 static const struct spi_device_id
*spi_match_id(const struct spi_device_id
*id
,
320 const struct spi_device
*sdev
)
322 while (id
->name
[0]) {
323 if (!strcmp(sdev
->modalias
, id
->name
))
330 const struct spi_device_id
*spi_get_device_id(const struct spi_device
*sdev
)
332 const struct spi_driver
*sdrv
= to_spi_driver(sdev
->dev
.driver
);
334 return spi_match_id(sdrv
->id_table
, sdev
);
336 EXPORT_SYMBOL_GPL(spi_get_device_id
);
338 static int spi_match_device(struct device
*dev
, struct device_driver
*drv
)
340 const struct spi_device
*spi
= to_spi_device(dev
);
341 const struct spi_driver
*sdrv
= to_spi_driver(drv
);
343 /* Check override first, and if set, only use the named driver */
344 if (spi
->driver_override
)
345 return strcmp(spi
->driver_override
, drv
->name
) == 0;
347 /* Attempt an OF style match */
348 if (of_driver_match_device(dev
, drv
))
352 if (acpi_driver_match_device(dev
, drv
))
356 return !!spi_match_id(sdrv
->id_table
, spi
);
358 return strcmp(spi
->modalias
, drv
->name
) == 0;
361 static int spi_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
363 const struct spi_device
*spi
= to_spi_device(dev
);
366 rc
= acpi_device_uevent_modalias(dev
, env
);
370 return add_uevent_var(env
, "MODALIAS=%s%s", SPI_MODULE_PREFIX
, spi
->modalias
);
373 struct bus_type spi_bus_type
= {
375 .dev_groups
= spi_dev_groups
,
376 .match
= spi_match_device
,
377 .uevent
= spi_uevent
,
379 EXPORT_SYMBOL_GPL(spi_bus_type
);
382 static int spi_drv_probe(struct device
*dev
)
384 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
385 struct spi_device
*spi
= to_spi_device(dev
);
388 ret
= of_clk_set_defaults(dev
->of_node
, false);
393 spi
->irq
= of_irq_get(dev
->of_node
, 0);
394 if (spi
->irq
== -EPROBE_DEFER
)
395 return -EPROBE_DEFER
;
400 ret
= dev_pm_domain_attach(dev
, true);
405 ret
= sdrv
->probe(spi
);
407 dev_pm_domain_detach(dev
, true);
413 static int spi_drv_remove(struct device
*dev
)
415 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
419 ret
= sdrv
->remove(to_spi_device(dev
));
420 dev_pm_domain_detach(dev
, true);
425 static void spi_drv_shutdown(struct device
*dev
)
427 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
429 sdrv
->shutdown(to_spi_device(dev
));
433 * __spi_register_driver - register a SPI driver
434 * @owner: owner module of the driver to register
435 * @sdrv: the driver to register
438 * Return: zero on success, else a negative error code.
440 int __spi_register_driver(struct module
*owner
, struct spi_driver
*sdrv
)
442 sdrv
->driver
.owner
= owner
;
443 sdrv
->driver
.bus
= &spi_bus_type
;
444 sdrv
->driver
.probe
= spi_drv_probe
;
445 sdrv
->driver
.remove
= spi_drv_remove
;
447 sdrv
->driver
.shutdown
= spi_drv_shutdown
;
448 return driver_register(&sdrv
->driver
);
450 EXPORT_SYMBOL_GPL(__spi_register_driver
);
452 /*-------------------------------------------------------------------------*/
454 /* SPI devices should normally not be created by SPI device drivers; that
455 * would make them board-specific. Similarly with SPI controller drivers.
456 * Device registration normally goes into like arch/.../mach.../board-YYY.c
457 * with other readonly (flashable) information about mainboard devices.
461 struct list_head list
;
462 struct spi_board_info board_info
;
465 static LIST_HEAD(board_list
);
466 static LIST_HEAD(spi_controller_list
);
469 * Used to protect add/del opertion for board_info list and
470 * spi_controller list, and their matching process
471 * also used to protect object of type struct idr
473 static DEFINE_MUTEX(board_lock
);
476 * Prevents addition of devices with same chip select and
477 * addition of devices below an unregistering controller.
479 static DEFINE_MUTEX(spi_add_lock
);
482 * spi_alloc_device - Allocate a new SPI device
483 * @ctlr: Controller to which device is connected
486 * Allows a driver to allocate and initialize a spi_device without
487 * registering it immediately. This allows a driver to directly
488 * fill the spi_device with device parameters before calling
489 * spi_add_device() on it.
491 * Caller is responsible to call spi_add_device() on the returned
492 * spi_device structure to add it to the SPI controller. If the caller
493 * needs to discard the spi_device without adding it, then it should
494 * call spi_dev_put() on it.
496 * Return: a pointer to the new device, or NULL.
498 struct spi_device
*spi_alloc_device(struct spi_controller
*ctlr
)
500 struct spi_device
*spi
;
502 if (!spi_controller_get(ctlr
))
505 spi
= kzalloc(sizeof(*spi
), GFP_KERNEL
);
507 spi_controller_put(ctlr
);
511 spi
->master
= spi
->controller
= ctlr
;
512 spi
->dev
.parent
= &ctlr
->dev
;
513 spi
->dev
.bus
= &spi_bus_type
;
514 spi
->dev
.release
= spidev_release
;
515 spi
->cs_gpio
= -ENOENT
;
516 spi
->mode
= ctlr
->buswidth_override_bits
;
518 spin_lock_init(&spi
->statistics
.lock
);
520 device_initialize(&spi
->dev
);
523 EXPORT_SYMBOL_GPL(spi_alloc_device
);
525 static void spi_dev_set_name(struct spi_device
*spi
)
527 struct acpi_device
*adev
= ACPI_COMPANION(&spi
->dev
);
530 dev_set_name(&spi
->dev
, "spi-%s", acpi_dev_name(adev
));
534 dev_set_name(&spi
->dev
, "%s.%u", dev_name(&spi
->controller
->dev
),
538 static int spi_dev_check(struct device
*dev
, void *data
)
540 struct spi_device
*spi
= to_spi_device(dev
);
541 struct spi_device
*new_spi
= data
;
543 if (spi
->controller
== new_spi
->controller
&&
544 spi
->chip_select
== new_spi
->chip_select
)
549 static void spi_cleanup(struct spi_device
*spi
)
551 if (spi
->controller
->cleanup
)
552 spi
->controller
->cleanup(spi
);
556 * spi_add_device - Add spi_device allocated with spi_alloc_device
557 * @spi: spi_device to register
559 * Companion function to spi_alloc_device. Devices allocated with
560 * spi_alloc_device can be added onto the spi bus with this function.
562 * Return: 0 on success; negative errno on failure
564 int spi_add_device(struct spi_device
*spi
)
566 struct spi_controller
*ctlr
= spi
->controller
;
567 struct device
*dev
= ctlr
->dev
.parent
;
570 /* Chipselects are numbered 0..max; validate. */
571 if (spi
->chip_select
>= ctlr
->num_chipselect
) {
572 dev_err(dev
, "cs%d >= max %d\n", spi
->chip_select
,
573 ctlr
->num_chipselect
);
577 /* Set the bus ID string */
578 spi_dev_set_name(spi
);
580 /* We need to make sure there's no other device with this
581 * chipselect **BEFORE** we call setup(), else we'll trash
582 * its configuration. Lock against concurrent add() calls.
584 mutex_lock(&spi_add_lock
);
586 status
= bus_for_each_dev(&spi_bus_type
, NULL
, spi
, spi_dev_check
);
588 dev_err(dev
, "chipselect %d already in use\n",
593 /* Controller may unregister concurrently */
594 if (IS_ENABLED(CONFIG_SPI_DYNAMIC
) &&
595 !device_is_registered(&ctlr
->dev
)) {
600 /* Descriptors take precedence */
602 spi
->cs_gpiod
= ctlr
->cs_gpiods
[spi
->chip_select
];
603 else if (ctlr
->cs_gpios
)
604 spi
->cs_gpio
= ctlr
->cs_gpios
[spi
->chip_select
];
606 /* Drivers may modify this initial i/o setup, but will
607 * normally rely on the device being setup. Devices
608 * using SPI_CS_HIGH can't coexist well otherwise...
610 status
= spi_setup(spi
);
612 dev_err(dev
, "can't setup %s, status %d\n",
613 dev_name(&spi
->dev
), status
);
617 /* Device may be bound to an active driver when this returns */
618 status
= device_add(&spi
->dev
);
620 dev_err(dev
, "can't add %s, status %d\n",
621 dev_name(&spi
->dev
), status
);
624 dev_dbg(dev
, "registered child %s\n", dev_name(&spi
->dev
));
628 mutex_unlock(&spi_add_lock
);
631 EXPORT_SYMBOL_GPL(spi_add_device
);
634 * spi_new_device - instantiate one new SPI device
635 * @ctlr: Controller to which device is connected
636 * @chip: Describes the SPI device
639 * On typical mainboards, this is purely internal; and it's not needed
640 * after board init creates the hard-wired devices. Some development
641 * platforms may not be able to use spi_register_board_info though, and
642 * this is exported so that for example a USB or parport based adapter
643 * driver could add devices (which it would learn about out-of-band).
645 * Return: the new device, or NULL.
647 struct spi_device
*spi_new_device(struct spi_controller
*ctlr
,
648 struct spi_board_info
*chip
)
650 struct spi_device
*proxy
;
653 /* NOTE: caller did any chip->bus_num checks necessary.
655 * Also, unless we change the return value convention to use
656 * error-or-pointer (not NULL-or-pointer), troubleshootability
657 * suggests syslogged diagnostics are best here (ugh).
660 proxy
= spi_alloc_device(ctlr
);
664 WARN_ON(strlen(chip
->modalias
) >= sizeof(proxy
->modalias
));
666 proxy
->chip_select
= chip
->chip_select
;
667 proxy
->max_speed_hz
= chip
->max_speed_hz
;
668 proxy
->mode
= chip
->mode
;
669 proxy
->irq
= chip
->irq
;
670 strlcpy(proxy
->modalias
, chip
->modalias
, sizeof(proxy
->modalias
));
671 proxy
->dev
.platform_data
= (void *) chip
->platform_data
;
672 proxy
->controller_data
= chip
->controller_data
;
673 proxy
->controller_state
= NULL
;
675 if (chip
->properties
) {
676 status
= device_add_properties(&proxy
->dev
, chip
->properties
);
679 "failed to add properties to '%s': %d\n",
680 chip
->modalias
, status
);
685 status
= spi_add_device(proxy
);
687 goto err_remove_props
;
692 if (chip
->properties
)
693 device_remove_properties(&proxy
->dev
);
698 EXPORT_SYMBOL_GPL(spi_new_device
);
701 * spi_unregister_device - unregister a single SPI device
702 * @spi: spi_device to unregister
704 * Start making the passed SPI device vanish. Normally this would be handled
705 * by spi_unregister_controller().
707 void spi_unregister_device(struct spi_device
*spi
)
712 if (spi
->dev
.of_node
) {
713 of_node_clear_flag(spi
->dev
.of_node
, OF_POPULATED
);
714 of_node_put(spi
->dev
.of_node
);
716 if (ACPI_COMPANION(&spi
->dev
))
717 acpi_device_clear_enumerated(ACPI_COMPANION(&spi
->dev
));
718 device_del(&spi
->dev
);
720 put_device(&spi
->dev
);
722 EXPORT_SYMBOL_GPL(spi_unregister_device
);
724 static void spi_match_controller_to_boardinfo(struct spi_controller
*ctlr
,
725 struct spi_board_info
*bi
)
727 struct spi_device
*dev
;
729 if (ctlr
->bus_num
!= bi
->bus_num
)
732 dev
= spi_new_device(ctlr
, bi
);
734 dev_err(ctlr
->dev
.parent
, "can't create new device for %s\n",
739 * spi_register_board_info - register SPI devices for a given board
740 * @info: array of chip descriptors
741 * @n: how many descriptors are provided
744 * Board-specific early init code calls this (probably during arch_initcall)
745 * with segments of the SPI device table. Any device nodes are created later,
746 * after the relevant parent SPI controller (bus_num) is defined. We keep
747 * this table of devices forever, so that reloading a controller driver will
748 * not make Linux forget about these hard-wired devices.
750 * Other code can also call this, e.g. a particular add-on board might provide
751 * SPI devices through its expansion connector, so code initializing that board
752 * would naturally declare its SPI devices.
754 * The board info passed can safely be __initdata ... but be careful of
755 * any embedded pointers (platform_data, etc), they're copied as-is.
756 * Device properties are deep-copied though.
758 * Return: zero on success, else a negative error code.
760 int spi_register_board_info(struct spi_board_info
const *info
, unsigned n
)
762 struct boardinfo
*bi
;
768 bi
= kcalloc(n
, sizeof(*bi
), GFP_KERNEL
);
772 for (i
= 0; i
< n
; i
++, bi
++, info
++) {
773 struct spi_controller
*ctlr
;
775 memcpy(&bi
->board_info
, info
, sizeof(*info
));
776 if (info
->properties
) {
777 bi
->board_info
.properties
=
778 property_entries_dup(info
->properties
);
779 if (IS_ERR(bi
->board_info
.properties
))
780 return PTR_ERR(bi
->board_info
.properties
);
783 mutex_lock(&board_lock
);
784 list_add_tail(&bi
->list
, &board_list
);
785 list_for_each_entry(ctlr
, &spi_controller_list
, list
)
786 spi_match_controller_to_boardinfo(ctlr
,
788 mutex_unlock(&board_lock
);
794 /*-------------------------------------------------------------------------*/
796 static void spi_set_cs(struct spi_device
*spi
, bool enable
)
798 if (spi
->mode
& SPI_CS_HIGH
)
801 if (spi
->cs_gpiod
|| gpio_is_valid(spi
->cs_gpio
)) {
803 * Honour the SPI_NO_CS flag and invert the enable line, as
804 * active low is default for SPI. Execution paths that handle
805 * polarity inversion in gpiolib (such as device tree) will
806 * enforce active high using the SPI_CS_HIGH resulting in a
807 * double inversion through the code above.
809 if (!(spi
->mode
& SPI_NO_CS
)) {
811 gpiod_set_value_cansleep(spi
->cs_gpiod
,
814 gpio_set_value_cansleep(spi
->cs_gpio
, !enable
);
816 /* Some SPI masters need both GPIO CS & slave_select */
817 if ((spi
->controller
->flags
& SPI_MASTER_GPIO_SS
) &&
818 spi
->controller
->set_cs
)
819 spi
->controller
->set_cs(spi
, !enable
);
820 } else if (spi
->controller
->set_cs
) {
821 spi
->controller
->set_cs(spi
, !enable
);
825 #ifdef CONFIG_HAS_DMA
826 int spi_map_buf(struct spi_controller
*ctlr
, struct device
*dev
,
827 struct sg_table
*sgt
, void *buf
, size_t len
,
828 enum dma_data_direction dir
)
830 const bool vmalloced_buf
= is_vmalloc_addr(buf
);
831 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
832 #ifdef CONFIG_HIGHMEM
833 const bool kmap_buf
= ((unsigned long)buf
>= PKMAP_BASE
&&
834 (unsigned long)buf
< (PKMAP_BASE
+
835 (LAST_PKMAP
* PAGE_SIZE
)));
837 const bool kmap_buf
= false;
841 struct page
*vm_page
;
842 struct scatterlist
*sg
;
847 if (vmalloced_buf
|| kmap_buf
) {
848 desc_len
= min_t(unsigned long, max_seg_size
, PAGE_SIZE
);
849 sgs
= DIV_ROUND_UP(len
+ offset_in_page(buf
), desc_len
);
850 } else if (virt_addr_valid(buf
)) {
851 desc_len
= min_t(size_t, max_seg_size
, ctlr
->max_dma_len
);
852 sgs
= DIV_ROUND_UP(len
, desc_len
);
857 ret
= sg_alloc_table(sgt
, sgs
, GFP_KERNEL
);
862 for (i
= 0; i
< sgs
; i
++) {
864 if (vmalloced_buf
|| kmap_buf
) {
866 * Next scatterlist entry size is the minimum between
867 * the desc_len and the remaining buffer length that
870 min
= min_t(size_t, desc_len
,
872 PAGE_SIZE
- offset_in_page(buf
)));
874 vm_page
= vmalloc_to_page(buf
);
876 vm_page
= kmap_to_page(buf
);
881 sg_set_page(sg
, vm_page
,
882 min
, offset_in_page(buf
));
884 min
= min_t(size_t, len
, desc_len
);
886 sg_set_buf(sg
, sg_buf
, min
);
894 ret
= dma_map_sg(dev
, sgt
->sgl
, sgt
->nents
, dir
);
907 void spi_unmap_buf(struct spi_controller
*ctlr
, struct device
*dev
,
908 struct sg_table
*sgt
, enum dma_data_direction dir
)
910 if (sgt
->orig_nents
) {
911 dma_unmap_sg(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
916 static int __spi_map_msg(struct spi_controller
*ctlr
, struct spi_message
*msg
)
918 struct device
*tx_dev
, *rx_dev
;
919 struct spi_transfer
*xfer
;
926 tx_dev
= ctlr
->dma_tx
->device
->dev
;
928 tx_dev
= ctlr
->dev
.parent
;
931 rx_dev
= ctlr
->dma_rx
->device
->dev
;
933 rx_dev
= ctlr
->dev
.parent
;
935 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
936 if (!ctlr
->can_dma(ctlr
, msg
->spi
, xfer
))
939 if (xfer
->tx_buf
!= NULL
) {
940 ret
= spi_map_buf(ctlr
, tx_dev
, &xfer
->tx_sg
,
941 (void *)xfer
->tx_buf
, xfer
->len
,
947 if (xfer
->rx_buf
!= NULL
) {
948 ret
= spi_map_buf(ctlr
, rx_dev
, &xfer
->rx_sg
,
949 xfer
->rx_buf
, xfer
->len
,
952 spi_unmap_buf(ctlr
, tx_dev
, &xfer
->tx_sg
,
959 ctlr
->cur_msg_mapped
= true;
964 static int __spi_unmap_msg(struct spi_controller
*ctlr
, struct spi_message
*msg
)
966 struct spi_transfer
*xfer
;
967 struct device
*tx_dev
, *rx_dev
;
969 if (!ctlr
->cur_msg_mapped
|| !ctlr
->can_dma
)
973 tx_dev
= ctlr
->dma_tx
->device
->dev
;
975 tx_dev
= ctlr
->dev
.parent
;
978 rx_dev
= ctlr
->dma_rx
->device
->dev
;
980 rx_dev
= ctlr
->dev
.parent
;
982 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
983 if (!ctlr
->can_dma(ctlr
, msg
->spi
, xfer
))
986 spi_unmap_buf(ctlr
, rx_dev
, &xfer
->rx_sg
, DMA_FROM_DEVICE
);
987 spi_unmap_buf(ctlr
, tx_dev
, &xfer
->tx_sg
, DMA_TO_DEVICE
);
992 #else /* !CONFIG_HAS_DMA */
993 static inline int __spi_map_msg(struct spi_controller
*ctlr
,
994 struct spi_message
*msg
)
999 static inline int __spi_unmap_msg(struct spi_controller
*ctlr
,
1000 struct spi_message
*msg
)
1004 #endif /* !CONFIG_HAS_DMA */
1006 static inline int spi_unmap_msg(struct spi_controller
*ctlr
,
1007 struct spi_message
*msg
)
1009 struct spi_transfer
*xfer
;
1011 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
1013 * Restore the original value of tx_buf or rx_buf if they are
1016 if (xfer
->tx_buf
== ctlr
->dummy_tx
)
1017 xfer
->tx_buf
= NULL
;
1018 if (xfer
->rx_buf
== ctlr
->dummy_rx
)
1019 xfer
->rx_buf
= NULL
;
1022 return __spi_unmap_msg(ctlr
, msg
);
1025 static int spi_map_msg(struct spi_controller
*ctlr
, struct spi_message
*msg
)
1027 struct spi_transfer
*xfer
;
1029 unsigned int max_tx
, max_rx
;
1031 if (ctlr
->flags
& (SPI_CONTROLLER_MUST_RX
| SPI_CONTROLLER_MUST_TX
)) {
1035 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
1036 if ((ctlr
->flags
& SPI_CONTROLLER_MUST_TX
) &&
1038 max_tx
= max(xfer
->len
, max_tx
);
1039 if ((ctlr
->flags
& SPI_CONTROLLER_MUST_RX
) &&
1041 max_rx
= max(xfer
->len
, max_rx
);
1045 tmp
= krealloc(ctlr
->dummy_tx
, max_tx
,
1046 GFP_KERNEL
| GFP_DMA
);
1049 ctlr
->dummy_tx
= tmp
;
1050 memset(tmp
, 0, max_tx
);
1054 tmp
= krealloc(ctlr
->dummy_rx
, max_rx
,
1055 GFP_KERNEL
| GFP_DMA
);
1058 ctlr
->dummy_rx
= tmp
;
1061 if (max_tx
|| max_rx
) {
1062 list_for_each_entry(xfer
, &msg
->transfers
,
1067 xfer
->tx_buf
= ctlr
->dummy_tx
;
1069 xfer
->rx_buf
= ctlr
->dummy_rx
;
1074 return __spi_map_msg(ctlr
, msg
);
1077 static int spi_transfer_wait(struct spi_controller
*ctlr
,
1078 struct spi_message
*msg
,
1079 struct spi_transfer
*xfer
)
1081 struct spi_statistics
*statm
= &ctlr
->statistics
;
1082 struct spi_statistics
*stats
= &msg
->spi
->statistics
;
1083 unsigned long long ms
= 1;
1085 if (spi_controller_is_slave(ctlr
)) {
1086 if (wait_for_completion_interruptible(&ctlr
->xfer_completion
)) {
1087 dev_dbg(&msg
->spi
->dev
, "SPI transfer interrupted\n");
1091 ms
= 8LL * 1000LL * xfer
->len
;
1092 do_div(ms
, xfer
->speed_hz
);
1093 ms
+= ms
+ 200; /* some tolerance */
1098 ms
= wait_for_completion_timeout(&ctlr
->xfer_completion
,
1099 msecs_to_jiffies(ms
));
1102 SPI_STATISTICS_INCREMENT_FIELD(statm
, timedout
);
1103 SPI_STATISTICS_INCREMENT_FIELD(stats
, timedout
);
1104 dev_err(&msg
->spi
->dev
,
1105 "SPI transfer timed out\n");
1113 static void _spi_transfer_delay_ns(u32 ns
)
1120 u32 us
= DIV_ROUND_UP(ns
, 1000);
1125 usleep_range(us
, us
+ DIV_ROUND_UP(us
, 10));
1129 static void _spi_transfer_cs_change_delay(struct spi_message
*msg
,
1130 struct spi_transfer
*xfer
)
1132 u32 delay
= xfer
->cs_change_delay
;
1133 u32 unit
= xfer
->cs_change_delay_unit
;
1136 /* return early on "fast" mode - for everything but USECS */
1137 if (!delay
&& unit
!= SPI_DELAY_UNIT_USECS
)
1141 case SPI_DELAY_UNIT_USECS
:
1142 /* for compatibility use default of 10us */
1148 case SPI_DELAY_UNIT_NSECS
: /* nothing to do here */
1150 case SPI_DELAY_UNIT_SCK
:
1151 /* if there is no effective speed know, then approximate
1152 * by underestimating with half the requested hz
1154 hz
= xfer
->effective_speed_hz
?: xfer
->speed_hz
/ 2;
1155 delay
*= DIV_ROUND_UP(1000000000, hz
);
1158 dev_err_once(&msg
->spi
->dev
,
1159 "Use of unsupported delay unit %i, using default of 10us\n",
1160 xfer
->cs_change_delay_unit
);
1163 /* now sleep for the requested amount of time */
1164 _spi_transfer_delay_ns(delay
);
1168 * spi_transfer_one_message - Default implementation of transfer_one_message()
1170 * This is a standard implementation of transfer_one_message() for
1171 * drivers which implement a transfer_one() operation. It provides
1172 * standard handling of delays and chip select management.
1174 static int spi_transfer_one_message(struct spi_controller
*ctlr
,
1175 struct spi_message
*msg
)
1177 struct spi_transfer
*xfer
;
1178 bool keep_cs
= false;
1180 struct spi_statistics
*statm
= &ctlr
->statistics
;
1181 struct spi_statistics
*stats
= &msg
->spi
->statistics
;
1183 spi_set_cs(msg
->spi
, true);
1185 SPI_STATISTICS_INCREMENT_FIELD(statm
, messages
);
1186 SPI_STATISTICS_INCREMENT_FIELD(stats
, messages
);
1188 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
1189 trace_spi_transfer_start(msg
, xfer
);
1191 spi_statistics_add_transfer_stats(statm
, xfer
, ctlr
);
1192 spi_statistics_add_transfer_stats(stats
, xfer
, ctlr
);
1194 if (xfer
->tx_buf
|| xfer
->rx_buf
) {
1195 reinit_completion(&ctlr
->xfer_completion
);
1197 ret
= ctlr
->transfer_one(ctlr
, msg
->spi
, xfer
);
1199 SPI_STATISTICS_INCREMENT_FIELD(statm
,
1201 SPI_STATISTICS_INCREMENT_FIELD(stats
,
1203 dev_err(&msg
->spi
->dev
,
1204 "SPI transfer failed: %d\n", ret
);
1209 ret
= spi_transfer_wait(ctlr
, msg
, xfer
);
1215 dev_err(&msg
->spi
->dev
,
1216 "Bufferless transfer has length %u\n",
1220 trace_spi_transfer_stop(msg
, xfer
);
1222 if (msg
->status
!= -EINPROGRESS
)
1225 if (xfer
->delay_usecs
)
1226 _spi_transfer_delay_ns(xfer
->delay_usecs
* 1000);
1228 if (xfer
->cs_change
) {
1229 if (list_is_last(&xfer
->transfer_list
,
1233 spi_set_cs(msg
->spi
, false);
1234 _spi_transfer_cs_change_delay(msg
, xfer
);
1235 spi_set_cs(msg
->spi
, true);
1239 msg
->actual_length
+= xfer
->len
;
1243 if (ret
!= 0 || !keep_cs
)
1244 spi_set_cs(msg
->spi
, false);
1246 if (msg
->status
== -EINPROGRESS
)
1249 if (msg
->status
&& ctlr
->handle_err
)
1250 ctlr
->handle_err(ctlr
, msg
);
1252 spi_finalize_current_message(ctlr
);
1258 * spi_finalize_current_transfer - report completion of a transfer
1259 * @ctlr: the controller reporting completion
1261 * Called by SPI drivers using the core transfer_one_message()
1262 * implementation to notify it that the current interrupt driven
1263 * transfer has finished and the next one may be scheduled.
1265 void spi_finalize_current_transfer(struct spi_controller
*ctlr
)
1267 complete(&ctlr
->xfer_completion
);
1269 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer
);
1272 * __spi_pump_messages - function which processes spi message queue
1273 * @ctlr: controller to process queue for
1274 * @in_kthread: true if we are in the context of the message pump thread
1276 * This function checks if there is any spi message in the queue that
1277 * needs processing and if so call out to the driver to initialize hardware
1278 * and transfer each message.
1280 * Note that it is called both from the kthread itself and also from
1281 * inside spi_sync(); the queue extraction handling at the top of the
1282 * function should deal with this safely.
1284 static void __spi_pump_messages(struct spi_controller
*ctlr
, bool in_kthread
)
1286 struct spi_message
*msg
;
1287 bool was_busy
= false;
1288 unsigned long flags
;
1292 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1294 /* Make sure we are not already running a message */
1295 if (ctlr
->cur_msg
) {
1296 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1300 /* If another context is idling the device then defer */
1302 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1303 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1307 /* Check if the queue is idle */
1308 if (list_empty(&ctlr
->queue
) || !ctlr
->running
) {
1310 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1314 /* Only do teardown in the thread */
1316 kthread_queue_work(&ctlr
->kworker
,
1317 &ctlr
->pump_messages
);
1318 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1323 ctlr
->idling
= true;
1324 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1326 kfree(ctlr
->dummy_rx
);
1327 ctlr
->dummy_rx
= NULL
;
1328 kfree(ctlr
->dummy_tx
);
1329 ctlr
->dummy_tx
= NULL
;
1330 if (ctlr
->unprepare_transfer_hardware
&&
1331 ctlr
->unprepare_transfer_hardware(ctlr
))
1333 "failed to unprepare transfer hardware\n");
1334 if (ctlr
->auto_runtime_pm
) {
1335 pm_runtime_mark_last_busy(ctlr
->dev
.parent
);
1336 pm_runtime_put_autosuspend(ctlr
->dev
.parent
);
1338 trace_spi_controller_idle(ctlr
);
1340 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1341 ctlr
->idling
= false;
1342 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1346 /* Extract head of queue */
1347 msg
= list_first_entry(&ctlr
->queue
, struct spi_message
, queue
);
1348 ctlr
->cur_msg
= msg
;
1350 list_del_init(&msg
->queue
);
1355 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1357 mutex_lock(&ctlr
->io_mutex
);
1359 if (!was_busy
&& ctlr
->auto_runtime_pm
) {
1360 ret
= pm_runtime_get_sync(ctlr
->dev
.parent
);
1362 pm_runtime_put_noidle(ctlr
->dev
.parent
);
1363 dev_err(&ctlr
->dev
, "Failed to power device: %d\n",
1365 mutex_unlock(&ctlr
->io_mutex
);
1371 trace_spi_controller_busy(ctlr
);
1373 if (!was_busy
&& ctlr
->prepare_transfer_hardware
) {
1374 ret
= ctlr
->prepare_transfer_hardware(ctlr
);
1377 "failed to prepare transfer hardware: %d\n",
1380 if (ctlr
->auto_runtime_pm
)
1381 pm_runtime_put(ctlr
->dev
.parent
);
1384 spi_finalize_current_message(ctlr
);
1386 mutex_unlock(&ctlr
->io_mutex
);
1391 trace_spi_message_start(msg
);
1393 if (ctlr
->prepare_message
) {
1394 ret
= ctlr
->prepare_message(ctlr
, msg
);
1396 dev_err(&ctlr
->dev
, "failed to prepare message: %d\n",
1399 spi_finalize_current_message(ctlr
);
1402 ctlr
->cur_msg_prepared
= true;
1405 ret
= spi_map_msg(ctlr
, msg
);
1408 spi_finalize_current_message(ctlr
);
1412 ret
= ctlr
->transfer_one_message(ctlr
, msg
);
1415 "failed to transfer one message from queue\n");
1420 mutex_unlock(&ctlr
->io_mutex
);
1422 /* Prod the scheduler in case transfer_one() was busy waiting */
1428 * spi_pump_messages - kthread work function which processes spi message queue
1429 * @work: pointer to kthread work struct contained in the controller struct
1431 static void spi_pump_messages(struct kthread_work
*work
)
1433 struct spi_controller
*ctlr
=
1434 container_of(work
, struct spi_controller
, pump_messages
);
1436 __spi_pump_messages(ctlr
, true);
1440 * spi_set_thread_rt - set the controller to pump at realtime priority
1441 * @ctlr: controller to boost priority of
1443 * This can be called because the controller requested realtime priority
1444 * (by setting the ->rt value before calling spi_register_controller()) or
1445 * because a device on the bus said that its transfers needed realtime
1448 * NOTE: at the moment if any device on a bus says it needs realtime then
1449 * the thread will be at realtime priority for all transfers on that
1450 * controller. If this eventually becomes a problem we may see if we can
1451 * find a way to boost the priority only temporarily during relevant
1454 static void spi_set_thread_rt(struct spi_controller
*ctlr
)
1456 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
/ 2 };
1458 dev_info(&ctlr
->dev
,
1459 "will run message pump with realtime priority\n");
1460 sched_setscheduler(ctlr
->kworker_task
, SCHED_FIFO
, ¶m
);
1463 static int spi_init_queue(struct spi_controller
*ctlr
)
1465 ctlr
->running
= false;
1468 kthread_init_worker(&ctlr
->kworker
);
1469 ctlr
->kworker_task
= kthread_run(kthread_worker_fn
, &ctlr
->kworker
,
1470 "%s", dev_name(&ctlr
->dev
));
1471 if (IS_ERR(ctlr
->kworker_task
)) {
1472 dev_err(&ctlr
->dev
, "failed to create message pump task\n");
1473 return PTR_ERR(ctlr
->kworker_task
);
1475 kthread_init_work(&ctlr
->pump_messages
, spi_pump_messages
);
1478 * Controller config will indicate if this controller should run the
1479 * message pump with high (realtime) priority to reduce the transfer
1480 * latency on the bus by minimising the delay between a transfer
1481 * request and the scheduling of the message pump thread. Without this
1482 * setting the message pump thread will remain at default priority.
1485 spi_set_thread_rt(ctlr
);
1491 * spi_get_next_queued_message() - called by driver to check for queued
1493 * @ctlr: the controller to check for queued messages
1495 * If there are more messages in the queue, the next message is returned from
1498 * Return: the next message in the queue, else NULL if the queue is empty.
1500 struct spi_message
*spi_get_next_queued_message(struct spi_controller
*ctlr
)
1502 struct spi_message
*next
;
1503 unsigned long flags
;
1505 /* get a pointer to the next message, if any */
1506 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1507 next
= list_first_entry_or_null(&ctlr
->queue
, struct spi_message
,
1509 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1513 EXPORT_SYMBOL_GPL(spi_get_next_queued_message
);
1516 * spi_finalize_current_message() - the current message is complete
1517 * @ctlr: the controller to return the message to
1519 * Called by the driver to notify the core that the message in the front of the
1520 * queue is complete and can be removed from the queue.
1522 void spi_finalize_current_message(struct spi_controller
*ctlr
)
1524 struct spi_message
*mesg
;
1525 unsigned long flags
;
1528 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1529 mesg
= ctlr
->cur_msg
;
1530 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1532 spi_unmap_msg(ctlr
, mesg
);
1534 /* In the prepare_messages callback the spi bus has the opportunity to
1535 * split a transfer to smaller chunks.
1536 * Release splited transfers here since spi_map_msg is done on the
1537 * splited transfers.
1539 spi_res_release(ctlr
, mesg
);
1541 if (ctlr
->cur_msg_prepared
&& ctlr
->unprepare_message
) {
1542 ret
= ctlr
->unprepare_message(ctlr
, mesg
);
1544 dev_err(&ctlr
->dev
, "failed to unprepare message: %d\n",
1549 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1550 ctlr
->cur_msg
= NULL
;
1551 ctlr
->cur_msg_prepared
= false;
1552 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1553 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1555 trace_spi_message_done(mesg
);
1559 mesg
->complete(mesg
->context
);
1561 EXPORT_SYMBOL_GPL(spi_finalize_current_message
);
1563 static int spi_start_queue(struct spi_controller
*ctlr
)
1565 unsigned long flags
;
1567 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1569 if (ctlr
->running
|| ctlr
->busy
) {
1570 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1574 ctlr
->running
= true;
1575 ctlr
->cur_msg
= NULL
;
1576 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1578 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1583 static int spi_stop_queue(struct spi_controller
*ctlr
)
1585 unsigned long flags
;
1586 unsigned limit
= 500;
1589 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1592 * This is a bit lame, but is optimized for the common execution path.
1593 * A wait_queue on the ctlr->busy could be used, but then the common
1594 * execution path (pump_messages) would be required to call wake_up or
1595 * friends on every SPI message. Do this instead.
1597 while ((!list_empty(&ctlr
->queue
) || ctlr
->busy
) && limit
--) {
1598 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1599 usleep_range(10000, 11000);
1600 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1603 if (!list_empty(&ctlr
->queue
) || ctlr
->busy
)
1606 ctlr
->running
= false;
1608 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1611 dev_warn(&ctlr
->dev
, "could not stop message queue\n");
1617 static int spi_destroy_queue(struct spi_controller
*ctlr
)
1621 ret
= spi_stop_queue(ctlr
);
1624 * kthread_flush_worker will block until all work is done.
1625 * If the reason that stop_queue timed out is that the work will never
1626 * finish, then it does no good to call flush/stop thread, so
1630 dev_err(&ctlr
->dev
, "problem destroying queue\n");
1634 kthread_flush_worker(&ctlr
->kworker
);
1635 kthread_stop(ctlr
->kworker_task
);
1640 static int __spi_queued_transfer(struct spi_device
*spi
,
1641 struct spi_message
*msg
,
1644 struct spi_controller
*ctlr
= spi
->controller
;
1645 unsigned long flags
;
1647 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1649 if (!ctlr
->running
) {
1650 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1653 msg
->actual_length
= 0;
1654 msg
->status
= -EINPROGRESS
;
1656 list_add_tail(&msg
->queue
, &ctlr
->queue
);
1657 if (!ctlr
->busy
&& need_pump
)
1658 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1660 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1665 * spi_queued_transfer - transfer function for queued transfers
1666 * @spi: spi device which is requesting transfer
1667 * @msg: spi message which is to handled is queued to driver queue
1669 * Return: zero on success, else a negative error code.
1671 static int spi_queued_transfer(struct spi_device
*spi
, struct spi_message
*msg
)
1673 return __spi_queued_transfer(spi
, msg
, true);
1676 static int spi_controller_initialize_queue(struct spi_controller
*ctlr
)
1680 ctlr
->transfer
= spi_queued_transfer
;
1681 if (!ctlr
->transfer_one_message
)
1682 ctlr
->transfer_one_message
= spi_transfer_one_message
;
1684 /* Initialize and start queue */
1685 ret
= spi_init_queue(ctlr
);
1687 dev_err(&ctlr
->dev
, "problem initializing queue\n");
1688 goto err_init_queue
;
1690 ctlr
->queued
= true;
1691 ret
= spi_start_queue(ctlr
);
1693 dev_err(&ctlr
->dev
, "problem starting queue\n");
1694 goto err_start_queue
;
1700 spi_destroy_queue(ctlr
);
1706 * spi_flush_queue - Send all pending messages in the queue from the callers'
1708 * @ctlr: controller to process queue for
1710 * This should be used when one wants to ensure all pending messages have been
1711 * sent before doing something. Is used by the spi-mem code to make sure SPI
1712 * memory operations do not preempt regular SPI transfers that have been queued
1713 * before the spi-mem operation.
1715 void spi_flush_queue(struct spi_controller
*ctlr
)
1717 if (ctlr
->transfer
== spi_queued_transfer
)
1718 __spi_pump_messages(ctlr
, false);
1721 /*-------------------------------------------------------------------------*/
1723 #if defined(CONFIG_OF)
1724 static int of_spi_parse_dt(struct spi_controller
*ctlr
, struct spi_device
*spi
,
1725 struct device_node
*nc
)
1730 /* Mode (clock phase/polarity/etc.) */
1731 if (of_property_read_bool(nc
, "spi-cpha"))
1732 spi
->mode
|= SPI_CPHA
;
1733 if (of_property_read_bool(nc
, "spi-cpol"))
1734 spi
->mode
|= SPI_CPOL
;
1735 if (of_property_read_bool(nc
, "spi-3wire"))
1736 spi
->mode
|= SPI_3WIRE
;
1737 if (of_property_read_bool(nc
, "spi-lsb-first"))
1738 spi
->mode
|= SPI_LSB_FIRST
;
1739 if (of_property_read_bool(nc
, "spi-cs-high"))
1740 spi
->mode
|= SPI_CS_HIGH
;
1742 /* Device DUAL/QUAD mode */
1743 if (!of_property_read_u32(nc
, "spi-tx-bus-width", &value
)) {
1748 spi
->mode
|= SPI_TX_DUAL
;
1751 spi
->mode
|= SPI_TX_QUAD
;
1754 spi
->mode
|= SPI_TX_OCTAL
;
1757 dev_warn(&ctlr
->dev
,
1758 "spi-tx-bus-width %d not supported\n",
1764 if (!of_property_read_u32(nc
, "spi-rx-bus-width", &value
)) {
1769 spi
->mode
|= SPI_RX_DUAL
;
1772 spi
->mode
|= SPI_RX_QUAD
;
1775 spi
->mode
|= SPI_RX_OCTAL
;
1778 dev_warn(&ctlr
->dev
,
1779 "spi-rx-bus-width %d not supported\n",
1785 if (spi_controller_is_slave(ctlr
)) {
1786 if (!of_node_name_eq(nc
, "slave")) {
1787 dev_err(&ctlr
->dev
, "%pOF is not called 'slave'\n",
1794 /* Device address */
1795 rc
= of_property_read_u32(nc
, "reg", &value
);
1797 dev_err(&ctlr
->dev
, "%pOF has no valid 'reg' property (%d)\n",
1801 spi
->chip_select
= value
;
1804 * For descriptors associated with the device, polarity inversion is
1805 * handled in the gpiolib, so all gpio chip selects are "active high"
1806 * in the logical sense, the gpiolib will invert the line if need be.
1808 if ((ctlr
->use_gpio_descriptors
) && ctlr
->cs_gpiods
&&
1809 ctlr
->cs_gpiods
[spi
->chip_select
])
1810 spi
->mode
|= SPI_CS_HIGH
;
1813 rc
= of_property_read_u32(nc
, "spi-max-frequency", &value
);
1816 "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc
, rc
);
1819 spi
->max_speed_hz
= value
;
1824 static struct spi_device
*
1825 of_register_spi_device(struct spi_controller
*ctlr
, struct device_node
*nc
)
1827 struct spi_device
*spi
;
1830 /* Alloc an spi_device */
1831 spi
= spi_alloc_device(ctlr
);
1833 dev_err(&ctlr
->dev
, "spi_device alloc error for %pOF\n", nc
);
1838 /* Select device driver */
1839 rc
= of_modalias_node(nc
, spi
->modalias
,
1840 sizeof(spi
->modalias
));
1842 dev_err(&ctlr
->dev
, "cannot find modalias for %pOF\n", nc
);
1846 rc
= of_spi_parse_dt(ctlr
, spi
, nc
);
1850 /* Store a pointer to the node in the device structure */
1852 spi
->dev
.of_node
= nc
;
1853 spi
->dev
.fwnode
= of_fwnode_handle(nc
);
1855 /* Register the new device */
1856 rc
= spi_add_device(spi
);
1858 dev_err(&ctlr
->dev
, "spi_device register error %pOF\n", nc
);
1859 goto err_of_node_put
;
1872 * of_register_spi_devices() - Register child devices onto the SPI bus
1873 * @ctlr: Pointer to spi_controller device
1875 * Registers an spi_device for each child node of controller node which
1876 * represents a valid SPI slave.
1878 static void of_register_spi_devices(struct spi_controller
*ctlr
)
1880 struct spi_device
*spi
;
1881 struct device_node
*nc
;
1883 if (!ctlr
->dev
.of_node
)
1886 for_each_available_child_of_node(ctlr
->dev
.of_node
, nc
) {
1887 if (of_node_test_and_set_flag(nc
, OF_POPULATED
))
1889 spi
= of_register_spi_device(ctlr
, nc
);
1891 dev_warn(&ctlr
->dev
,
1892 "Failed to create SPI device for %pOF\n", nc
);
1893 of_node_clear_flag(nc
, OF_POPULATED
);
1898 static void of_register_spi_devices(struct spi_controller
*ctlr
) { }
1902 struct acpi_spi_lookup
{
1903 struct spi_controller
*ctlr
;
1911 static void acpi_spi_parse_apple_properties(struct acpi_device
*dev
,
1912 struct acpi_spi_lookup
*lookup
)
1914 const union acpi_object
*obj
;
1916 if (!x86_apple_machine
)
1919 if (!acpi_dev_get_property(dev
, "spiSclkPeriod", ACPI_TYPE_BUFFER
, &obj
)
1920 && obj
->buffer
.length
>= 4)
1921 lookup
->max_speed_hz
= NSEC_PER_SEC
/ *(u32
*)obj
->buffer
.pointer
;
1923 if (!acpi_dev_get_property(dev
, "spiWordSize", ACPI_TYPE_BUFFER
, &obj
)
1924 && obj
->buffer
.length
== 8)
1925 lookup
->bits_per_word
= *(u64
*)obj
->buffer
.pointer
;
1927 if (!acpi_dev_get_property(dev
, "spiBitOrder", ACPI_TYPE_BUFFER
, &obj
)
1928 && obj
->buffer
.length
== 8 && !*(u64
*)obj
->buffer
.pointer
)
1929 lookup
->mode
|= SPI_LSB_FIRST
;
1931 if (!acpi_dev_get_property(dev
, "spiSPO", ACPI_TYPE_BUFFER
, &obj
)
1932 && obj
->buffer
.length
== 8 && *(u64
*)obj
->buffer
.pointer
)
1933 lookup
->mode
|= SPI_CPOL
;
1935 if (!acpi_dev_get_property(dev
, "spiSPH", ACPI_TYPE_BUFFER
, &obj
)
1936 && obj
->buffer
.length
== 8 && *(u64
*)obj
->buffer
.pointer
)
1937 lookup
->mode
|= SPI_CPHA
;
1940 static int acpi_spi_add_resource(struct acpi_resource
*ares
, void *data
)
1942 struct acpi_spi_lookup
*lookup
= data
;
1943 struct spi_controller
*ctlr
= lookup
->ctlr
;
1945 if (ares
->type
== ACPI_RESOURCE_TYPE_SERIAL_BUS
) {
1946 struct acpi_resource_spi_serialbus
*sb
;
1947 acpi_handle parent_handle
;
1950 sb
= &ares
->data
.spi_serial_bus
;
1951 if (sb
->type
== ACPI_RESOURCE_SERIAL_TYPE_SPI
) {
1953 status
= acpi_get_handle(NULL
,
1954 sb
->resource_source
.string_ptr
,
1957 if (ACPI_FAILURE(status
) ||
1958 ACPI_HANDLE(ctlr
->dev
.parent
) != parent_handle
)
1962 * ACPI DeviceSelection numbering is handled by the
1963 * host controller driver in Windows and can vary
1964 * from driver to driver. In Linux we always expect
1965 * 0 .. max - 1 so we need to ask the driver to
1966 * translate between the two schemes.
1968 if (ctlr
->fw_translate_cs
) {
1969 int cs
= ctlr
->fw_translate_cs(ctlr
,
1970 sb
->device_selection
);
1973 lookup
->chip_select
= cs
;
1975 lookup
->chip_select
= sb
->device_selection
;
1978 lookup
->max_speed_hz
= sb
->connection_speed
;
1979 lookup
->bits_per_word
= sb
->data_bit_length
;
1981 if (sb
->clock_phase
== ACPI_SPI_SECOND_PHASE
)
1982 lookup
->mode
|= SPI_CPHA
;
1983 if (sb
->clock_polarity
== ACPI_SPI_START_HIGH
)
1984 lookup
->mode
|= SPI_CPOL
;
1985 if (sb
->device_polarity
== ACPI_SPI_ACTIVE_HIGH
)
1986 lookup
->mode
|= SPI_CS_HIGH
;
1988 } else if (lookup
->irq
< 0) {
1991 if (acpi_dev_resource_interrupt(ares
, 0, &r
))
1992 lookup
->irq
= r
.start
;
1995 /* Always tell the ACPI core to skip this resource */
1999 static acpi_status
acpi_register_spi_device(struct spi_controller
*ctlr
,
2000 struct acpi_device
*adev
)
2002 acpi_handle parent_handle
= NULL
;
2003 struct list_head resource_list
;
2004 struct acpi_spi_lookup lookup
= {};
2005 struct spi_device
*spi
;
2008 if (acpi_bus_get_status(adev
) || !adev
->status
.present
||
2009 acpi_device_enumerated(adev
))
2015 INIT_LIST_HEAD(&resource_list
);
2016 ret
= acpi_dev_get_resources(adev
, &resource_list
,
2017 acpi_spi_add_resource
, &lookup
);
2018 acpi_dev_free_resource_list(&resource_list
);
2021 /* found SPI in _CRS but it points to another controller */
2024 if (!lookup
.max_speed_hz
&&
2025 !ACPI_FAILURE(acpi_get_parent(adev
->handle
, &parent_handle
)) &&
2026 ACPI_HANDLE(ctlr
->dev
.parent
) == parent_handle
) {
2027 /* Apple does not use _CRS but nested devices for SPI slaves */
2028 acpi_spi_parse_apple_properties(adev
, &lookup
);
2031 if (!lookup
.max_speed_hz
)
2034 spi
= spi_alloc_device(ctlr
);
2036 dev_err(&ctlr
->dev
, "failed to allocate SPI device for %s\n",
2037 dev_name(&adev
->dev
));
2038 return AE_NO_MEMORY
;
2042 ACPI_COMPANION_SET(&spi
->dev
, adev
);
2043 spi
->max_speed_hz
= lookup
.max_speed_hz
;
2044 spi
->mode
|= lookup
.mode
;
2045 spi
->irq
= lookup
.irq
;
2046 spi
->bits_per_word
= lookup
.bits_per_word
;
2047 spi
->chip_select
= lookup
.chip_select
;
2049 acpi_set_modalias(adev
, acpi_device_hid(adev
), spi
->modalias
,
2050 sizeof(spi
->modalias
));
2053 spi
->irq
= acpi_dev_gpio_irq_get(adev
, 0);
2055 acpi_device_set_enumerated(adev
);
2057 adev
->power
.flags
.ignore_parent
= true;
2058 if (spi_add_device(spi
)) {
2059 adev
->power
.flags
.ignore_parent
= false;
2060 dev_err(&ctlr
->dev
, "failed to add SPI device %s from ACPI\n",
2061 dev_name(&adev
->dev
));
2068 static acpi_status
acpi_spi_add_device(acpi_handle handle
, u32 level
,
2069 void *data
, void **return_value
)
2071 struct spi_controller
*ctlr
= data
;
2072 struct acpi_device
*adev
;
2074 if (acpi_bus_get_device(handle
, &adev
))
2077 return acpi_register_spi_device(ctlr
, adev
);
2080 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2082 static void acpi_register_spi_devices(struct spi_controller
*ctlr
)
2087 handle
= ACPI_HANDLE(ctlr
->dev
.parent
);
2091 status
= acpi_walk_namespace(ACPI_TYPE_DEVICE
, ACPI_ROOT_OBJECT
,
2092 SPI_ACPI_ENUMERATE_MAX_DEPTH
,
2093 acpi_spi_add_device
, NULL
, ctlr
, NULL
);
2094 if (ACPI_FAILURE(status
))
2095 dev_warn(&ctlr
->dev
, "failed to enumerate SPI slaves\n");
2098 static inline void acpi_register_spi_devices(struct spi_controller
*ctlr
) {}
2099 #endif /* CONFIG_ACPI */
2101 static void spi_controller_release(struct device
*dev
)
2103 struct spi_controller
*ctlr
;
2105 ctlr
= container_of(dev
, struct spi_controller
, dev
);
2109 static struct class spi_master_class
= {
2110 .name
= "spi_master",
2111 .owner
= THIS_MODULE
,
2112 .dev_release
= spi_controller_release
,
2113 .dev_groups
= spi_master_groups
,
2116 #ifdef CONFIG_SPI_SLAVE
2118 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2120 * @spi: device used for the current transfer
2122 int spi_slave_abort(struct spi_device
*spi
)
2124 struct spi_controller
*ctlr
= spi
->controller
;
2126 if (spi_controller_is_slave(ctlr
) && ctlr
->slave_abort
)
2127 return ctlr
->slave_abort(ctlr
);
2131 EXPORT_SYMBOL_GPL(spi_slave_abort
);
2133 static int match_true(struct device
*dev
, void *data
)
2138 static ssize_t
slave_show(struct device
*dev
, struct device_attribute
*attr
,
2141 struct spi_controller
*ctlr
= container_of(dev
, struct spi_controller
,
2143 struct device
*child
;
2145 child
= device_find_child(&ctlr
->dev
, NULL
, match_true
);
2146 return sprintf(buf
, "%s\n",
2147 child
? to_spi_device(child
)->modalias
: NULL
);
2150 static ssize_t
slave_store(struct device
*dev
, struct device_attribute
*attr
,
2151 const char *buf
, size_t count
)
2153 struct spi_controller
*ctlr
= container_of(dev
, struct spi_controller
,
2155 struct spi_device
*spi
;
2156 struct device
*child
;
2160 rc
= sscanf(buf
, "%31s", name
);
2161 if (rc
!= 1 || !name
[0])
2164 child
= device_find_child(&ctlr
->dev
, NULL
, match_true
);
2166 /* Remove registered slave */
2167 device_unregister(child
);
2171 if (strcmp(name
, "(null)")) {
2172 /* Register new slave */
2173 spi
= spi_alloc_device(ctlr
);
2177 strlcpy(spi
->modalias
, name
, sizeof(spi
->modalias
));
2179 rc
= spi_add_device(spi
);
2189 static DEVICE_ATTR_RW(slave
);
2191 static struct attribute
*spi_slave_attrs
[] = {
2192 &dev_attr_slave
.attr
,
2196 static const struct attribute_group spi_slave_group
= {
2197 .attrs
= spi_slave_attrs
,
2200 static const struct attribute_group
*spi_slave_groups
[] = {
2201 &spi_controller_statistics_group
,
2206 static struct class spi_slave_class
= {
2207 .name
= "spi_slave",
2208 .owner
= THIS_MODULE
,
2209 .dev_release
= spi_controller_release
,
2210 .dev_groups
= spi_slave_groups
,
2213 extern struct class spi_slave_class
; /* dummy */
2217 * __spi_alloc_controller - allocate an SPI master or slave controller
2218 * @dev: the controller, possibly using the platform_bus
2219 * @size: how much zeroed driver-private data to allocate; the pointer to this
2220 * memory is in the driver_data field of the returned device, accessible
2221 * with spi_controller_get_devdata(); the memory is cacheline aligned;
2222 * drivers granting DMA access to portions of their private data need to
2223 * round up @size using ALIGN(size, dma_get_cache_alignment()).
2224 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2225 * slave (true) controller
2226 * Context: can sleep
2228 * This call is used only by SPI controller drivers, which are the
2229 * only ones directly touching chip registers. It's how they allocate
2230 * an spi_controller structure, prior to calling spi_register_controller().
2232 * This must be called from context that can sleep.
2234 * The caller is responsible for assigning the bus number and initializing the
2235 * controller's methods before calling spi_register_controller(); and (after
2236 * errors adding the device) calling spi_controller_put() to prevent a memory
2239 * Return: the SPI controller structure on success, else NULL.
2241 struct spi_controller
*__spi_alloc_controller(struct device
*dev
,
2242 unsigned int size
, bool slave
)
2244 struct spi_controller
*ctlr
;
2245 size_t ctlr_size
= ALIGN(sizeof(*ctlr
), dma_get_cache_alignment());
2250 ctlr
= kzalloc(size
+ ctlr_size
, GFP_KERNEL
);
2254 device_initialize(&ctlr
->dev
);
2256 ctlr
->num_chipselect
= 1;
2257 ctlr
->slave
= slave
;
2258 if (IS_ENABLED(CONFIG_SPI_SLAVE
) && slave
)
2259 ctlr
->dev
.class = &spi_slave_class
;
2261 ctlr
->dev
.class = &spi_master_class
;
2262 ctlr
->dev
.parent
= dev
;
2263 pm_suspend_ignore_children(&ctlr
->dev
, true);
2264 spi_controller_set_devdata(ctlr
, (void *)ctlr
+ ctlr_size
);
2268 EXPORT_SYMBOL_GPL(__spi_alloc_controller
);
2270 static void devm_spi_release_controller(struct device
*dev
, void *ctlr
)
2272 spi_controller_put(*(struct spi_controller
**)ctlr
);
2276 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2277 * @dev: physical device of SPI controller
2278 * @size: how much zeroed driver-private data to allocate
2279 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2280 * Context: can sleep
2282 * Allocate an SPI controller and automatically release a reference on it
2283 * when @dev is unbound from its driver. Drivers are thus relieved from
2284 * having to call spi_controller_put().
2286 * The arguments to this function are identical to __spi_alloc_controller().
2288 * Return: the SPI controller structure on success, else NULL.
2290 struct spi_controller
*__devm_spi_alloc_controller(struct device
*dev
,
2294 struct spi_controller
**ptr
, *ctlr
;
2296 ptr
= devres_alloc(devm_spi_release_controller
, sizeof(*ptr
),
2301 ctlr
= __spi_alloc_controller(dev
, size
, slave
);
2303 ctlr
->devm_allocated
= true;
2305 devres_add(dev
, ptr
);
2312 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller
);
2315 static int of_spi_get_gpio_numbers(struct spi_controller
*ctlr
)
2318 struct device_node
*np
= ctlr
->dev
.of_node
;
2323 nb
= of_gpio_named_count(np
, "cs-gpios");
2324 ctlr
->num_chipselect
= max_t(int, nb
, ctlr
->num_chipselect
);
2326 /* Return error only for an incorrectly formed cs-gpios property */
2327 if (nb
== 0 || nb
== -ENOENT
)
2332 cs
= devm_kcalloc(&ctlr
->dev
, ctlr
->num_chipselect
, sizeof(int),
2334 ctlr
->cs_gpios
= cs
;
2336 if (!ctlr
->cs_gpios
)
2339 for (i
= 0; i
< ctlr
->num_chipselect
; i
++)
2342 for (i
= 0; i
< nb
; i
++)
2343 cs
[i
] = of_get_named_gpio(np
, "cs-gpios", i
);
2348 static int of_spi_get_gpio_numbers(struct spi_controller
*ctlr
)
2355 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2356 * @ctlr: The SPI master to grab GPIO descriptors for
2358 static int spi_get_gpio_descs(struct spi_controller
*ctlr
)
2361 struct gpio_desc
**cs
;
2362 struct device
*dev
= &ctlr
->dev
;
2364 nb
= gpiod_count(dev
, "cs");
2365 ctlr
->num_chipselect
= max_t(int, nb
, ctlr
->num_chipselect
);
2367 /* No GPIOs at all is fine, else return the error */
2368 if (nb
== 0 || nb
== -ENOENT
)
2373 cs
= devm_kcalloc(dev
, ctlr
->num_chipselect
, sizeof(*cs
),
2377 ctlr
->cs_gpiods
= cs
;
2379 for (i
= 0; i
< nb
; i
++) {
2381 * Most chipselects are active low, the inverted
2382 * semantics are handled by special quirks in gpiolib,
2383 * so initializing them GPIOD_OUT_LOW here means
2384 * "unasserted", in most cases this will drive the physical
2387 cs
[i
] = devm_gpiod_get_index_optional(dev
, "cs", i
,
2390 return PTR_ERR(cs
[i
]);
2394 * If we find a CS GPIO, name it after the device and
2399 gpioname
= devm_kasprintf(dev
, GFP_KERNEL
, "%s CS%d",
2403 gpiod_set_consumer_name(cs
[i
], gpioname
);
2410 static int spi_controller_check_ops(struct spi_controller
*ctlr
)
2413 * The controller may implement only the high-level SPI-memory like
2414 * operations if it does not support regular SPI transfers, and this is
2416 * If ->mem_ops is NULL, we request that at least one of the
2417 * ->transfer_xxx() method be implemented.
2419 if (ctlr
->mem_ops
) {
2420 if (!ctlr
->mem_ops
->exec_op
)
2422 } else if (!ctlr
->transfer
&& !ctlr
->transfer_one
&&
2423 !ctlr
->transfer_one_message
) {
2431 * spi_register_controller - register SPI master or slave controller
2432 * @ctlr: initialized master, originally from spi_alloc_master() or
2434 * Context: can sleep
2436 * SPI controllers connect to their drivers using some non-SPI bus,
2437 * such as the platform bus. The final stage of probe() in that code
2438 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2440 * SPI controllers use board specific (often SOC specific) bus numbers,
2441 * and board-specific addressing for SPI devices combines those numbers
2442 * with chip select numbers. Since SPI does not directly support dynamic
2443 * device identification, boards need configuration tables telling which
2444 * chip is at which address.
2446 * This must be called from context that can sleep. It returns zero on
2447 * success, else a negative error code (dropping the controller's refcount).
2448 * After a successful return, the caller is responsible for calling
2449 * spi_unregister_controller().
2451 * Return: zero on success, else a negative error code.
2453 int spi_register_controller(struct spi_controller
*ctlr
)
2455 struct device
*dev
= ctlr
->dev
.parent
;
2456 struct boardinfo
*bi
;
2458 int id
, first_dynamic
;
2464 * Make sure all necessary hooks are implemented before registering
2465 * the SPI controller.
2467 status
= spi_controller_check_ops(ctlr
);
2471 if (ctlr
->bus_num
>= 0) {
2472 /* devices with a fixed bus num must check-in with the num */
2473 mutex_lock(&board_lock
);
2474 id
= idr_alloc(&spi_master_idr
, ctlr
, ctlr
->bus_num
,
2475 ctlr
->bus_num
+ 1, GFP_KERNEL
);
2476 mutex_unlock(&board_lock
);
2477 if (WARN(id
< 0, "couldn't get idr"))
2478 return id
== -ENOSPC
? -EBUSY
: id
;
2480 } else if (ctlr
->dev
.of_node
) {
2481 /* allocate dynamic bus number using Linux idr */
2482 id
= of_alias_get_id(ctlr
->dev
.of_node
, "spi");
2485 mutex_lock(&board_lock
);
2486 id
= idr_alloc(&spi_master_idr
, ctlr
, ctlr
->bus_num
,
2487 ctlr
->bus_num
+ 1, GFP_KERNEL
);
2488 mutex_unlock(&board_lock
);
2489 if (WARN(id
< 0, "couldn't get idr"))
2490 return id
== -ENOSPC
? -EBUSY
: id
;
2493 if (ctlr
->bus_num
< 0) {
2494 first_dynamic
= of_alias_get_highest_id("spi");
2495 if (first_dynamic
< 0)
2500 mutex_lock(&board_lock
);
2501 id
= idr_alloc(&spi_master_idr
, ctlr
, first_dynamic
,
2503 mutex_unlock(&board_lock
);
2504 if (WARN(id
< 0, "couldn't get idr"))
2508 INIT_LIST_HEAD(&ctlr
->queue
);
2509 spin_lock_init(&ctlr
->queue_lock
);
2510 spin_lock_init(&ctlr
->bus_lock_spinlock
);
2511 mutex_init(&ctlr
->bus_lock_mutex
);
2512 mutex_init(&ctlr
->io_mutex
);
2513 ctlr
->bus_lock_flag
= 0;
2514 init_completion(&ctlr
->xfer_completion
);
2515 if (!ctlr
->max_dma_len
)
2516 ctlr
->max_dma_len
= INT_MAX
;
2518 /* register the device, then userspace will see it.
2519 * registration fails if the bus ID is in use.
2521 dev_set_name(&ctlr
->dev
, "spi%u", ctlr
->bus_num
);
2523 if (!spi_controller_is_slave(ctlr
)) {
2524 if (ctlr
->use_gpio_descriptors
) {
2525 status
= spi_get_gpio_descs(ctlr
);
2529 * A controller using GPIO descriptors always
2530 * supports SPI_CS_HIGH if need be.
2532 ctlr
->mode_bits
|= SPI_CS_HIGH
;
2534 /* Legacy code path for GPIOs from DT */
2535 status
= of_spi_get_gpio_numbers(ctlr
);
2542 * Even if it's just one always-selected device, there must
2543 * be at least one chipselect.
2545 if (!ctlr
->num_chipselect
) {
2550 status
= device_add(&ctlr
->dev
);
2553 dev_dbg(dev
, "registered %s %s\n",
2554 spi_controller_is_slave(ctlr
) ? "slave" : "master",
2555 dev_name(&ctlr
->dev
));
2558 * If we're using a queued driver, start the queue. Note that we don't
2559 * need the queueing logic if the driver is only supporting high-level
2560 * memory operations.
2562 if (ctlr
->transfer
) {
2563 dev_info(dev
, "controller is unqueued, this is deprecated\n");
2564 } else if (ctlr
->transfer_one
|| ctlr
->transfer_one_message
) {
2565 status
= spi_controller_initialize_queue(ctlr
);
2567 device_del(&ctlr
->dev
);
2571 /* add statistics */
2572 spin_lock_init(&ctlr
->statistics
.lock
);
2574 mutex_lock(&board_lock
);
2575 list_add_tail(&ctlr
->list
, &spi_controller_list
);
2576 list_for_each_entry(bi
, &board_list
, list
)
2577 spi_match_controller_to_boardinfo(ctlr
, &bi
->board_info
);
2578 mutex_unlock(&board_lock
);
2580 /* Register devices from the device tree and ACPI */
2581 of_register_spi_devices(ctlr
);
2582 acpi_register_spi_devices(ctlr
);
2586 mutex_lock(&board_lock
);
2587 idr_remove(&spi_master_idr
, ctlr
->bus_num
);
2588 mutex_unlock(&board_lock
);
2591 EXPORT_SYMBOL_GPL(spi_register_controller
);
2593 static void devm_spi_unregister(struct device
*dev
, void *res
)
2595 spi_unregister_controller(*(struct spi_controller
**)res
);
2599 * devm_spi_register_controller - register managed SPI master or slave
2601 * @dev: device managing SPI controller
2602 * @ctlr: initialized controller, originally from spi_alloc_master() or
2604 * Context: can sleep
2606 * Register a SPI device as with spi_register_controller() which will
2607 * automatically be unregistered and freed.
2609 * Return: zero on success, else a negative error code.
2611 int devm_spi_register_controller(struct device
*dev
,
2612 struct spi_controller
*ctlr
)
2614 struct spi_controller
**ptr
;
2617 ptr
= devres_alloc(devm_spi_unregister
, sizeof(*ptr
), GFP_KERNEL
);
2621 ret
= spi_register_controller(ctlr
);
2624 devres_add(dev
, ptr
);
2631 EXPORT_SYMBOL_GPL(devm_spi_register_controller
);
2633 static int __unregister(struct device
*dev
, void *null
)
2635 spi_unregister_device(to_spi_device(dev
));
2640 * spi_unregister_controller - unregister SPI master or slave controller
2641 * @ctlr: the controller being unregistered
2642 * Context: can sleep
2644 * This call is used only by SPI controller drivers, which are the
2645 * only ones directly touching chip registers.
2647 * This must be called from context that can sleep.
2649 * Note that this function also drops a reference to the controller.
2651 void spi_unregister_controller(struct spi_controller
*ctlr
)
2653 struct spi_controller
*found
;
2654 int id
= ctlr
->bus_num
;
2656 /* Prevent addition of new devices, unregister existing ones */
2657 if (IS_ENABLED(CONFIG_SPI_DYNAMIC
))
2658 mutex_lock(&spi_add_lock
);
2660 device_for_each_child(&ctlr
->dev
, NULL
, __unregister
);
2662 /* First make sure that this controller was ever added */
2663 mutex_lock(&board_lock
);
2664 found
= idr_find(&spi_master_idr
, id
);
2665 mutex_unlock(&board_lock
);
2667 if (spi_destroy_queue(ctlr
))
2668 dev_err(&ctlr
->dev
, "queue remove failed\n");
2670 mutex_lock(&board_lock
);
2671 list_del(&ctlr
->list
);
2672 mutex_unlock(&board_lock
);
2674 device_del(&ctlr
->dev
);
2676 /* Release the last reference on the controller if its driver
2677 * has not yet been converted to devm_spi_alloc_master/slave().
2679 if (!ctlr
->devm_allocated
)
2680 put_device(&ctlr
->dev
);
2683 mutex_lock(&board_lock
);
2685 idr_remove(&spi_master_idr
, id
);
2686 mutex_unlock(&board_lock
);
2688 if (IS_ENABLED(CONFIG_SPI_DYNAMIC
))
2689 mutex_unlock(&spi_add_lock
);
2691 EXPORT_SYMBOL_GPL(spi_unregister_controller
);
2693 int spi_controller_suspend(struct spi_controller
*ctlr
)
2697 /* Basically no-ops for non-queued controllers */
2701 ret
= spi_stop_queue(ctlr
);
2703 dev_err(&ctlr
->dev
, "queue stop failed\n");
2707 EXPORT_SYMBOL_GPL(spi_controller_suspend
);
2709 int spi_controller_resume(struct spi_controller
*ctlr
)
2716 ret
= spi_start_queue(ctlr
);
2718 dev_err(&ctlr
->dev
, "queue restart failed\n");
2722 EXPORT_SYMBOL_GPL(spi_controller_resume
);
2724 static int __spi_controller_match(struct device
*dev
, const void *data
)
2726 struct spi_controller
*ctlr
;
2727 const u16
*bus_num
= data
;
2729 ctlr
= container_of(dev
, struct spi_controller
, dev
);
2730 return ctlr
->bus_num
== *bus_num
;
2734 * spi_busnum_to_master - look up master associated with bus_num
2735 * @bus_num: the master's bus number
2736 * Context: can sleep
2738 * This call may be used with devices that are registered after
2739 * arch init time. It returns a refcounted pointer to the relevant
2740 * spi_controller (which the caller must release), or NULL if there is
2741 * no such master registered.
2743 * Return: the SPI master structure on success, else NULL.
2745 struct spi_controller
*spi_busnum_to_master(u16 bus_num
)
2748 struct spi_controller
*ctlr
= NULL
;
2750 dev
= class_find_device(&spi_master_class
, NULL
, &bus_num
,
2751 __spi_controller_match
);
2753 ctlr
= container_of(dev
, struct spi_controller
, dev
);
2754 /* reference got in class_find_device */
2757 EXPORT_SYMBOL_GPL(spi_busnum_to_master
);
2759 /*-------------------------------------------------------------------------*/
2761 /* Core methods for SPI resource management */
2764 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2765 * during the processing of a spi_message while using
2767 * @spi: the spi device for which we allocate memory
2768 * @release: the release code to execute for this resource
2769 * @size: size to alloc and return
2770 * @gfp: GFP allocation flags
2772 * Return: the pointer to the allocated data
2774 * This may get enhanced in the future to allocate from a memory pool
2775 * of the @spi_device or @spi_controller to avoid repeated allocations.
2777 void *spi_res_alloc(struct spi_device
*spi
,
2778 spi_res_release_t release
,
2779 size_t size
, gfp_t gfp
)
2781 struct spi_res
*sres
;
2783 sres
= kzalloc(sizeof(*sres
) + size
, gfp
);
2787 INIT_LIST_HEAD(&sres
->entry
);
2788 sres
->release
= release
;
2792 EXPORT_SYMBOL_GPL(spi_res_alloc
);
2795 * spi_res_free - free an spi resource
2796 * @res: pointer to the custom data of a resource
2799 void spi_res_free(void *res
)
2801 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2806 WARN_ON(!list_empty(&sres
->entry
));
2809 EXPORT_SYMBOL_GPL(spi_res_free
);
2812 * spi_res_add - add a spi_res to the spi_message
2813 * @message: the spi message
2814 * @res: the spi_resource
2816 void spi_res_add(struct spi_message
*message
, void *res
)
2818 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2820 WARN_ON(!list_empty(&sres
->entry
));
2821 list_add_tail(&sres
->entry
, &message
->resources
);
2823 EXPORT_SYMBOL_GPL(spi_res_add
);
2826 * spi_res_release - release all spi resources for this message
2827 * @ctlr: the @spi_controller
2828 * @message: the @spi_message
2830 void spi_res_release(struct spi_controller
*ctlr
, struct spi_message
*message
)
2832 struct spi_res
*res
, *tmp
;
2834 list_for_each_entry_safe_reverse(res
, tmp
, &message
->resources
, entry
) {
2836 res
->release(ctlr
, message
, res
->data
);
2838 list_del(&res
->entry
);
2843 EXPORT_SYMBOL_GPL(spi_res_release
);
2845 /*-------------------------------------------------------------------------*/
2847 /* Core methods for spi_message alterations */
2849 static void __spi_replace_transfers_release(struct spi_controller
*ctlr
,
2850 struct spi_message
*msg
,
2853 struct spi_replaced_transfers
*rxfer
= res
;
2856 /* call extra callback if requested */
2858 rxfer
->release(ctlr
, msg
, res
);
2860 /* insert replaced transfers back into the message */
2861 list_splice(&rxfer
->replaced_transfers
, rxfer
->replaced_after
);
2863 /* remove the formerly inserted entries */
2864 for (i
= 0; i
< rxfer
->inserted
; i
++)
2865 list_del(&rxfer
->inserted_transfers
[i
].transfer_list
);
2869 * spi_replace_transfers - replace transfers with several transfers
2870 * and register change with spi_message.resources
2871 * @msg: the spi_message we work upon
2872 * @xfer_first: the first spi_transfer we want to replace
2873 * @remove: number of transfers to remove
2874 * @insert: the number of transfers we want to insert instead
2875 * @release: extra release code necessary in some circumstances
2876 * @extradatasize: extra data to allocate (with alignment guarantees
2877 * of struct @spi_transfer)
2880 * Returns: pointer to @spi_replaced_transfers,
2881 * PTR_ERR(...) in case of errors.
2883 struct spi_replaced_transfers
*spi_replace_transfers(
2884 struct spi_message
*msg
,
2885 struct spi_transfer
*xfer_first
,
2888 spi_replaced_release_t release
,
2889 size_t extradatasize
,
2892 struct spi_replaced_transfers
*rxfer
;
2893 struct spi_transfer
*xfer
;
2896 /* allocate the structure using spi_res */
2897 rxfer
= spi_res_alloc(msg
->spi
, __spi_replace_transfers_release
,
2898 struct_size(rxfer
, inserted_transfers
, insert
)
2902 return ERR_PTR(-ENOMEM
);
2904 /* the release code to invoke before running the generic release */
2905 rxfer
->release
= release
;
2907 /* assign extradata */
2910 &rxfer
->inserted_transfers
[insert
];
2912 /* init the replaced_transfers list */
2913 INIT_LIST_HEAD(&rxfer
->replaced_transfers
);
2915 /* assign the list_entry after which we should reinsert
2916 * the @replaced_transfers - it may be spi_message.messages!
2918 rxfer
->replaced_after
= xfer_first
->transfer_list
.prev
;
2920 /* remove the requested number of transfers */
2921 for (i
= 0; i
< remove
; i
++) {
2922 /* if the entry after replaced_after it is msg->transfers
2923 * then we have been requested to remove more transfers
2924 * than are in the list
2926 if (rxfer
->replaced_after
->next
== &msg
->transfers
) {
2927 dev_err(&msg
->spi
->dev
,
2928 "requested to remove more spi_transfers than are available\n");
2929 /* insert replaced transfers back into the message */
2930 list_splice(&rxfer
->replaced_transfers
,
2931 rxfer
->replaced_after
);
2933 /* free the spi_replace_transfer structure */
2934 spi_res_free(rxfer
);
2936 /* and return with an error */
2937 return ERR_PTR(-EINVAL
);
2940 /* remove the entry after replaced_after from list of
2941 * transfers and add it to list of replaced_transfers
2943 list_move_tail(rxfer
->replaced_after
->next
,
2944 &rxfer
->replaced_transfers
);
2947 /* create copy of the given xfer with identical settings
2948 * based on the first transfer to get removed
2950 for (i
= 0; i
< insert
; i
++) {
2951 /* we need to run in reverse order */
2952 xfer
= &rxfer
->inserted_transfers
[insert
- 1 - i
];
2954 /* copy all spi_transfer data */
2955 memcpy(xfer
, xfer_first
, sizeof(*xfer
));
2958 list_add(&xfer
->transfer_list
, rxfer
->replaced_after
);
2960 /* clear cs_change and delay_usecs for all but the last */
2962 xfer
->cs_change
= false;
2963 xfer
->delay_usecs
= 0;
2967 /* set up inserted */
2968 rxfer
->inserted
= insert
;
2970 /* and register it with spi_res/spi_message */
2971 spi_res_add(msg
, rxfer
);
2975 EXPORT_SYMBOL_GPL(spi_replace_transfers
);
2977 static int __spi_split_transfer_maxsize(struct spi_controller
*ctlr
,
2978 struct spi_message
*msg
,
2979 struct spi_transfer
**xferp
,
2983 struct spi_transfer
*xfer
= *xferp
, *xfers
;
2984 struct spi_replaced_transfers
*srt
;
2988 /* calculate how many we have to replace */
2989 count
= DIV_ROUND_UP(xfer
->len
, maxsize
);
2991 /* create replacement */
2992 srt
= spi_replace_transfers(msg
, xfer
, 1, count
, NULL
, 0, gfp
);
2994 return PTR_ERR(srt
);
2995 xfers
= srt
->inserted_transfers
;
2997 /* now handle each of those newly inserted spi_transfers
2998 * note that the replacements spi_transfers all are preset
2999 * to the same values as *xferp, so tx_buf, rx_buf and len
3000 * are all identical (as well as most others)
3001 * so we just have to fix up len and the pointers.
3003 * this also includes support for the depreciated
3004 * spi_message.is_dma_mapped interface
3007 /* the first transfer just needs the length modified, so we
3008 * run it outside the loop
3010 xfers
[0].len
= min_t(size_t, maxsize
, xfer
[0].len
);
3012 /* all the others need rx_buf/tx_buf also set */
3013 for (i
= 1, offset
= maxsize
; i
< count
; offset
+= maxsize
, i
++) {
3014 /* update rx_buf, tx_buf and dma */
3015 if (xfers
[i
].rx_buf
)
3016 xfers
[i
].rx_buf
+= offset
;
3017 if (xfers
[i
].rx_dma
)
3018 xfers
[i
].rx_dma
+= offset
;
3019 if (xfers
[i
].tx_buf
)
3020 xfers
[i
].tx_buf
+= offset
;
3021 if (xfers
[i
].tx_dma
)
3022 xfers
[i
].tx_dma
+= offset
;
3025 xfers
[i
].len
= min(maxsize
, xfers
[i
].len
- offset
);
3028 /* we set up xferp to the last entry we have inserted,
3029 * so that we skip those already split transfers
3031 *xferp
= &xfers
[count
- 1];
3033 /* increment statistics counters */
3034 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
,
3035 transfers_split_maxsize
);
3036 SPI_STATISTICS_INCREMENT_FIELD(&msg
->spi
->statistics
,
3037 transfers_split_maxsize
);
3043 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
3044 * when an individual transfer exceeds a
3046 * @ctlr: the @spi_controller for this transfer
3047 * @msg: the @spi_message to transform
3048 * @maxsize: the maximum when to apply this
3049 * @gfp: GFP allocation flags
3051 * Return: status of transformation
3053 int spi_split_transfers_maxsize(struct spi_controller
*ctlr
,
3054 struct spi_message
*msg
,
3058 struct spi_transfer
*xfer
;
3061 /* iterate over the transfer_list,
3062 * but note that xfer is advanced to the last transfer inserted
3063 * to avoid checking sizes again unnecessarily (also xfer does
3064 * potentiall belong to a different list by the time the
3065 * replacement has happened
3067 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
3068 if (xfer
->len
> maxsize
) {
3069 ret
= __spi_split_transfer_maxsize(ctlr
, msg
, &xfer
,
3078 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize
);
3080 /*-------------------------------------------------------------------------*/
3082 /* Core methods for SPI controller protocol drivers. Some of the
3083 * other core methods are currently defined as inline functions.
3086 static int __spi_validate_bits_per_word(struct spi_controller
*ctlr
,
3089 if (ctlr
->bits_per_word_mask
) {
3090 /* Only 32 bits fit in the mask */
3091 if (bits_per_word
> 32)
3093 if (!(ctlr
->bits_per_word_mask
& SPI_BPW_MASK(bits_per_word
)))
3101 * spi_setup - setup SPI mode and clock rate
3102 * @spi: the device whose settings are being modified
3103 * Context: can sleep, and no requests are queued to the device
3105 * SPI protocol drivers may need to update the transfer mode if the
3106 * device doesn't work with its default. They may likewise need
3107 * to update clock rates or word sizes from initial values. This function
3108 * changes those settings, and must be called from a context that can sleep.
3109 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3110 * effect the next time the device is selected and data is transferred to
3111 * or from it. When this function returns, the spi device is deselected.
3113 * Note that this call will fail if the protocol driver specifies an option
3114 * that the underlying controller or its driver does not support. For
3115 * example, not all hardware supports wire transfers using nine bit words,
3116 * LSB-first wire encoding, or active-high chipselects.
3118 * Return: zero on success, else a negative error code.
3120 int spi_setup(struct spi_device
*spi
)
3122 unsigned bad_bits
, ugly_bits
;
3125 /* check mode to prevent that DUAL and QUAD set at the same time
3127 if (((spi
->mode
& SPI_TX_DUAL
) && (spi
->mode
& SPI_TX_QUAD
)) ||
3128 ((spi
->mode
& SPI_RX_DUAL
) && (spi
->mode
& SPI_RX_QUAD
))) {
3130 "setup: can not select dual and quad at the same time\n");
3133 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
3135 if ((spi
->mode
& SPI_3WIRE
) && (spi
->mode
&
3136 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_TX_OCTAL
|
3137 SPI_RX_DUAL
| SPI_RX_QUAD
| SPI_RX_OCTAL
)))
3139 /* help drivers fail *cleanly* when they need options
3140 * that aren't supported with their current controller
3141 * SPI_CS_WORD has a fallback software implementation,
3142 * so it is ignored here.
3144 bad_bits
= spi
->mode
& ~(spi
->controller
->mode_bits
| SPI_CS_WORD
);
3145 /* nothing prevents from working with active-high CS in case if it
3146 * is driven by GPIO.
3148 if (gpio_is_valid(spi
->cs_gpio
))
3149 bad_bits
&= ~SPI_CS_HIGH
;
3150 ugly_bits
= bad_bits
&
3151 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_TX_OCTAL
|
3152 SPI_RX_DUAL
| SPI_RX_QUAD
| SPI_RX_OCTAL
);
3155 "setup: ignoring unsupported mode bits %x\n",
3157 spi
->mode
&= ~ugly_bits
;
3158 bad_bits
&= ~ugly_bits
;
3161 dev_err(&spi
->dev
, "setup: unsupported mode bits %x\n",
3166 if (!spi
->bits_per_word
)
3167 spi
->bits_per_word
= 8;
3169 status
= __spi_validate_bits_per_word(spi
->controller
,
3170 spi
->bits_per_word
);
3174 if (!spi
->max_speed_hz
)
3175 spi
->max_speed_hz
= spi
->controller
->max_speed_hz
;
3177 if (spi
->controller
->setup
)
3178 status
= spi
->controller
->setup(spi
);
3180 spi_set_cs(spi
, false);
3182 if (spi
->rt
&& !spi
->controller
->rt
) {
3183 spi
->controller
->rt
= true;
3184 spi_set_thread_rt(spi
->controller
);
3187 dev_dbg(&spi
->dev
, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3188 (int) (spi
->mode
& (SPI_CPOL
| SPI_CPHA
)),
3189 (spi
->mode
& SPI_CS_HIGH
) ? "cs_high, " : "",
3190 (spi
->mode
& SPI_LSB_FIRST
) ? "lsb, " : "",
3191 (spi
->mode
& SPI_3WIRE
) ? "3wire, " : "",
3192 (spi
->mode
& SPI_LOOP
) ? "loopback, " : "",
3193 spi
->bits_per_word
, spi
->max_speed_hz
,
3198 EXPORT_SYMBOL_GPL(spi_setup
);
3201 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3202 * @spi: the device that requires specific CS timing configuration
3203 * @setup: CS setup time in terms of clock count
3204 * @hold: CS hold time in terms of clock count
3205 * @inactive_dly: CS inactive delay between transfers in terms of clock count
3207 void spi_set_cs_timing(struct spi_device
*spi
, u8 setup
, u8 hold
,
3210 if (spi
->controller
->set_cs_timing
)
3211 spi
->controller
->set_cs_timing(spi
, setup
, hold
, inactive_dly
);
3213 EXPORT_SYMBOL_GPL(spi_set_cs_timing
);
3215 static int __spi_validate(struct spi_device
*spi
, struct spi_message
*message
)
3217 struct spi_controller
*ctlr
= spi
->controller
;
3218 struct spi_transfer
*xfer
;
3221 if (list_empty(&message
->transfers
))
3224 /* If an SPI controller does not support toggling the CS line on each
3225 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3226 * for the CS line, we can emulate the CS-per-word hardware function by
3227 * splitting transfers into one-word transfers and ensuring that
3228 * cs_change is set for each transfer.
3230 if ((spi
->mode
& SPI_CS_WORD
) && (!(ctlr
->mode_bits
& SPI_CS_WORD
) ||
3232 gpio_is_valid(spi
->cs_gpio
))) {
3236 maxsize
= (spi
->bits_per_word
+ 7) / 8;
3238 /* spi_split_transfers_maxsize() requires message->spi */
3241 ret
= spi_split_transfers_maxsize(ctlr
, message
, maxsize
,
3246 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
3247 /* don't change cs_change on the last entry in the list */
3248 if (list_is_last(&xfer
->transfer_list
, &message
->transfers
))
3250 xfer
->cs_change
= 1;
3254 /* Half-duplex links include original MicroWire, and ones with
3255 * only one data pin like SPI_3WIRE (switches direction) or where
3256 * either MOSI or MISO is missing. They can also be caused by
3257 * software limitations.
3259 if ((ctlr
->flags
& SPI_CONTROLLER_HALF_DUPLEX
) ||
3260 (spi
->mode
& SPI_3WIRE
)) {
3261 unsigned flags
= ctlr
->flags
;
3263 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
3264 if (xfer
->rx_buf
&& xfer
->tx_buf
)
3266 if ((flags
& SPI_CONTROLLER_NO_TX
) && xfer
->tx_buf
)
3268 if ((flags
& SPI_CONTROLLER_NO_RX
) && xfer
->rx_buf
)
3274 * Set transfer bits_per_word and max speed as spi device default if
3275 * it is not set for this transfer.
3276 * Set transfer tx_nbits and rx_nbits as single transfer default
3277 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3278 * Ensure transfer word_delay is at least as long as that required by
3281 message
->frame_length
= 0;
3282 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
3283 xfer
->effective_speed_hz
= 0;
3284 message
->frame_length
+= xfer
->len
;
3285 if (!xfer
->bits_per_word
)
3286 xfer
->bits_per_word
= spi
->bits_per_word
;
3288 if (!xfer
->speed_hz
)
3289 xfer
->speed_hz
= spi
->max_speed_hz
;
3291 if (ctlr
->max_speed_hz
&& xfer
->speed_hz
> ctlr
->max_speed_hz
)
3292 xfer
->speed_hz
= ctlr
->max_speed_hz
;
3294 if (__spi_validate_bits_per_word(ctlr
, xfer
->bits_per_word
))
3298 * SPI transfer length should be multiple of SPI word size
3299 * where SPI word size should be power-of-two multiple
3301 if (xfer
->bits_per_word
<= 8)
3303 else if (xfer
->bits_per_word
<= 16)
3308 /* No partial transfers accepted */
3309 if (xfer
->len
% w_size
)
3312 if (xfer
->speed_hz
&& ctlr
->min_speed_hz
&&
3313 xfer
->speed_hz
< ctlr
->min_speed_hz
)
3316 if (xfer
->tx_buf
&& !xfer
->tx_nbits
)
3317 xfer
->tx_nbits
= SPI_NBITS_SINGLE
;
3318 if (xfer
->rx_buf
&& !xfer
->rx_nbits
)
3319 xfer
->rx_nbits
= SPI_NBITS_SINGLE
;
3320 /* check transfer tx/rx_nbits:
3321 * 1. check the value matches one of single, dual and quad
3322 * 2. check tx/rx_nbits match the mode in spi_device
3325 if (xfer
->tx_nbits
!= SPI_NBITS_SINGLE
&&
3326 xfer
->tx_nbits
!= SPI_NBITS_DUAL
&&
3327 xfer
->tx_nbits
!= SPI_NBITS_QUAD
)
3329 if ((xfer
->tx_nbits
== SPI_NBITS_DUAL
) &&
3330 !(spi
->mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
)))
3332 if ((xfer
->tx_nbits
== SPI_NBITS_QUAD
) &&
3333 !(spi
->mode
& SPI_TX_QUAD
))
3336 /* check transfer rx_nbits */
3338 if (xfer
->rx_nbits
!= SPI_NBITS_SINGLE
&&
3339 xfer
->rx_nbits
!= SPI_NBITS_DUAL
&&
3340 xfer
->rx_nbits
!= SPI_NBITS_QUAD
)
3342 if ((xfer
->rx_nbits
== SPI_NBITS_DUAL
) &&
3343 !(spi
->mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
)))
3345 if ((xfer
->rx_nbits
== SPI_NBITS_QUAD
) &&
3346 !(spi
->mode
& SPI_RX_QUAD
))
3350 if (xfer
->word_delay_usecs
< spi
->word_delay_usecs
)
3351 xfer
->word_delay_usecs
= spi
->word_delay_usecs
;
3354 message
->status
= -EINPROGRESS
;
3359 static int __spi_async(struct spi_device
*spi
, struct spi_message
*message
)
3361 struct spi_controller
*ctlr
= spi
->controller
;
3364 * Some controllers do not support doing regular SPI transfers. Return
3365 * ENOTSUPP when this is the case.
3367 if (!ctlr
->transfer
)
3372 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
, spi_async
);
3373 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_async
);
3375 trace_spi_message_submit(message
);
3377 return ctlr
->transfer(spi
, message
);
3381 * spi_async - asynchronous SPI transfer
3382 * @spi: device with which data will be exchanged
3383 * @message: describes the data transfers, including completion callback
3384 * Context: any (irqs may be blocked, etc)
3386 * This call may be used in_irq and other contexts which can't sleep,
3387 * as well as from task contexts which can sleep.
3389 * The completion callback is invoked in a context which can't sleep.
3390 * Before that invocation, the value of message->status is undefined.
3391 * When the callback is issued, message->status holds either zero (to
3392 * indicate complete success) or a negative error code. After that
3393 * callback returns, the driver which issued the transfer request may
3394 * deallocate the associated memory; it's no longer in use by any SPI
3395 * core or controller driver code.
3397 * Note that although all messages to a spi_device are handled in
3398 * FIFO order, messages may go to different devices in other orders.
3399 * Some device might be higher priority, or have various "hard" access
3400 * time requirements, for example.
3402 * On detection of any fault during the transfer, processing of
3403 * the entire message is aborted, and the device is deselected.
3404 * Until returning from the associated message completion callback,
3405 * no other spi_message queued to that device will be processed.
3406 * (This rule applies equally to all the synchronous transfer calls,
3407 * which are wrappers around this core asynchronous primitive.)
3409 * Return: zero on success, else a negative error code.
3411 int spi_async(struct spi_device
*spi
, struct spi_message
*message
)
3413 struct spi_controller
*ctlr
= spi
->controller
;
3415 unsigned long flags
;
3417 ret
= __spi_validate(spi
, message
);
3421 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
3423 if (ctlr
->bus_lock_flag
)
3426 ret
= __spi_async(spi
, message
);
3428 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
3432 EXPORT_SYMBOL_GPL(spi_async
);
3435 * spi_async_locked - version of spi_async with exclusive bus usage
3436 * @spi: device with which data will be exchanged
3437 * @message: describes the data transfers, including completion callback
3438 * Context: any (irqs may be blocked, etc)
3440 * This call may be used in_irq and other contexts which can't sleep,
3441 * as well as from task contexts which can sleep.
3443 * The completion callback is invoked in a context which can't sleep.
3444 * Before that invocation, the value of message->status is undefined.
3445 * When the callback is issued, message->status holds either zero (to
3446 * indicate complete success) or a negative error code. After that
3447 * callback returns, the driver which issued the transfer request may
3448 * deallocate the associated memory; it's no longer in use by any SPI
3449 * core or controller driver code.
3451 * Note that although all messages to a spi_device are handled in
3452 * FIFO order, messages may go to different devices in other orders.
3453 * Some device might be higher priority, or have various "hard" access
3454 * time requirements, for example.
3456 * On detection of any fault during the transfer, processing of
3457 * the entire message is aborted, and the device is deselected.
3458 * Until returning from the associated message completion callback,
3459 * no other spi_message queued to that device will be processed.
3460 * (This rule applies equally to all the synchronous transfer calls,
3461 * which are wrappers around this core asynchronous primitive.)
3463 * Return: zero on success, else a negative error code.
3465 int spi_async_locked(struct spi_device
*spi
, struct spi_message
*message
)
3467 struct spi_controller
*ctlr
= spi
->controller
;
3469 unsigned long flags
;
3471 ret
= __spi_validate(spi
, message
);
3475 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
3477 ret
= __spi_async(spi
, message
);
3479 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
3484 EXPORT_SYMBOL_GPL(spi_async_locked
);
3486 /*-------------------------------------------------------------------------*/
3488 /* Utility methods for SPI protocol drivers, layered on
3489 * top of the core. Some other utility methods are defined as
3493 static void spi_complete(void *arg
)
3498 static int __spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
3500 DECLARE_COMPLETION_ONSTACK(done
);
3502 struct spi_controller
*ctlr
= spi
->controller
;
3503 unsigned long flags
;
3505 status
= __spi_validate(spi
, message
);
3509 message
->complete
= spi_complete
;
3510 message
->context
= &done
;
3513 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
, spi_sync
);
3514 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_sync
);
3516 /* If we're not using the legacy transfer method then we will
3517 * try to transfer in the calling context so special case.
3518 * This code would be less tricky if we could remove the
3519 * support for driver implemented message queues.
3521 if (ctlr
->transfer
== spi_queued_transfer
) {
3522 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
3524 trace_spi_message_submit(message
);
3526 status
= __spi_queued_transfer(spi
, message
, false);
3528 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
3530 status
= spi_async_locked(spi
, message
);
3534 /* Push out the messages in the calling context if we
3537 if (ctlr
->transfer
== spi_queued_transfer
) {
3538 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
,
3539 spi_sync_immediate
);
3540 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
,
3541 spi_sync_immediate
);
3542 __spi_pump_messages(ctlr
, false);
3545 wait_for_completion(&done
);
3546 status
= message
->status
;
3548 message
->context
= NULL
;
3553 * spi_sync - blocking/synchronous SPI data transfers
3554 * @spi: device with which data will be exchanged
3555 * @message: describes the data transfers
3556 * Context: can sleep
3558 * This call may only be used from a context that may sleep. The sleep
3559 * is non-interruptible, and has no timeout. Low-overhead controller
3560 * drivers may DMA directly into and out of the message buffers.
3562 * Note that the SPI device's chip select is active during the message,
3563 * and then is normally disabled between messages. Drivers for some
3564 * frequently-used devices may want to minimize costs of selecting a chip,
3565 * by leaving it selected in anticipation that the next message will go
3566 * to the same chip. (That may increase power usage.)
3568 * Also, the caller is guaranteeing that the memory associated with the
3569 * message will not be freed before this call returns.
3571 * Return: zero on success, else a negative error code.
3573 int spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
3577 mutex_lock(&spi
->controller
->bus_lock_mutex
);
3578 ret
= __spi_sync(spi
, message
);
3579 mutex_unlock(&spi
->controller
->bus_lock_mutex
);
3583 EXPORT_SYMBOL_GPL(spi_sync
);
3586 * spi_sync_locked - version of spi_sync with exclusive bus usage
3587 * @spi: device with which data will be exchanged
3588 * @message: describes the data transfers
3589 * Context: can sleep
3591 * This call may only be used from a context that may sleep. The sleep
3592 * is non-interruptible, and has no timeout. Low-overhead controller
3593 * drivers may DMA directly into and out of the message buffers.
3595 * This call should be used by drivers that require exclusive access to the
3596 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3597 * be released by a spi_bus_unlock call when the exclusive access is over.
3599 * Return: zero on success, else a negative error code.
3601 int spi_sync_locked(struct spi_device
*spi
, struct spi_message
*message
)
3603 return __spi_sync(spi
, message
);
3605 EXPORT_SYMBOL_GPL(spi_sync_locked
);
3608 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3609 * @ctlr: SPI bus master that should be locked for exclusive bus access
3610 * Context: can sleep
3612 * This call may only be used from a context that may sleep. The sleep
3613 * is non-interruptible, and has no timeout.
3615 * This call should be used by drivers that require exclusive access to the
3616 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3617 * exclusive access is over. Data transfer must be done by spi_sync_locked
3618 * and spi_async_locked calls when the SPI bus lock is held.
3620 * Return: always zero.
3622 int spi_bus_lock(struct spi_controller
*ctlr
)
3624 unsigned long flags
;
3626 mutex_lock(&ctlr
->bus_lock_mutex
);
3628 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
3629 ctlr
->bus_lock_flag
= 1;
3630 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
3632 /* mutex remains locked until spi_bus_unlock is called */
3636 EXPORT_SYMBOL_GPL(spi_bus_lock
);
3639 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3640 * @ctlr: SPI bus master that was locked for exclusive bus access
3641 * Context: can sleep
3643 * This call may only be used from a context that may sleep. The sleep
3644 * is non-interruptible, and has no timeout.
3646 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3649 * Return: always zero.
3651 int spi_bus_unlock(struct spi_controller
*ctlr
)
3653 ctlr
->bus_lock_flag
= 0;
3655 mutex_unlock(&ctlr
->bus_lock_mutex
);
3659 EXPORT_SYMBOL_GPL(spi_bus_unlock
);
3661 /* portable code must never pass more than 32 bytes */
3662 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3667 * spi_write_then_read - SPI synchronous write followed by read
3668 * @spi: device with which data will be exchanged
3669 * @txbuf: data to be written (need not be dma-safe)
3670 * @n_tx: size of txbuf, in bytes
3671 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3672 * @n_rx: size of rxbuf, in bytes
3673 * Context: can sleep
3675 * This performs a half duplex MicroWire style transaction with the
3676 * device, sending txbuf and then reading rxbuf. The return value
3677 * is zero for success, else a negative errno status code.
3678 * This call may only be used from a context that may sleep.
3680 * Parameters to this routine are always copied using a small buffer;
3681 * portable code should never use this for more than 32 bytes.
3682 * Performance-sensitive or bulk transfer code should instead use
3683 * spi_{async,sync}() calls with dma-safe buffers.
3685 * Return: zero on success, else a negative error code.
3687 int spi_write_then_read(struct spi_device
*spi
,
3688 const void *txbuf
, unsigned n_tx
,
3689 void *rxbuf
, unsigned n_rx
)
3691 static DEFINE_MUTEX(lock
);
3694 struct spi_message message
;
3695 struct spi_transfer x
[2];
3698 /* Use preallocated DMA-safe buffer if we can. We can't avoid
3699 * copying here, (as a pure convenience thing), but we can
3700 * keep heap costs out of the hot path unless someone else is
3701 * using the pre-allocated buffer or the transfer is too large.
3703 if ((n_tx
+ n_rx
) > SPI_BUFSIZ
|| !mutex_trylock(&lock
)) {
3704 local_buf
= kmalloc(max((unsigned)SPI_BUFSIZ
, n_tx
+ n_rx
),
3705 GFP_KERNEL
| GFP_DMA
);
3712 spi_message_init(&message
);
3713 memset(x
, 0, sizeof(x
));
3716 spi_message_add_tail(&x
[0], &message
);
3720 spi_message_add_tail(&x
[1], &message
);
3723 memcpy(local_buf
, txbuf
, n_tx
);
3724 x
[0].tx_buf
= local_buf
;
3725 x
[1].rx_buf
= local_buf
+ n_tx
;
3728 status
= spi_sync(spi
, &message
);
3730 memcpy(rxbuf
, x
[1].rx_buf
, n_rx
);
3732 if (x
[0].tx_buf
== buf
)
3733 mutex_unlock(&lock
);
3739 EXPORT_SYMBOL_GPL(spi_write_then_read
);
3741 /*-------------------------------------------------------------------------*/
3743 #if IS_ENABLED(CONFIG_OF)
3744 /* must call put_device() when done with returned spi_device device */
3745 struct spi_device
*of_find_spi_device_by_node(struct device_node
*node
)
3747 struct device
*dev
= bus_find_device_by_of_node(&spi_bus_type
, node
);
3749 return dev
? to_spi_device(dev
) : NULL
;
3751 EXPORT_SYMBOL_GPL(of_find_spi_device_by_node
);
3752 #endif /* IS_ENABLED(CONFIG_OF) */
3754 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3755 /* the spi controllers are not using spi_bus, so we find it with another way */
3756 static struct spi_controller
*of_find_spi_controller_by_node(struct device_node
*node
)
3760 dev
= class_find_device_by_of_node(&spi_master_class
, node
);
3761 if (!dev
&& IS_ENABLED(CONFIG_SPI_SLAVE
))
3762 dev
= class_find_device_by_of_node(&spi_slave_class
, node
);
3766 /* reference got in class_find_device */
3767 return container_of(dev
, struct spi_controller
, dev
);
3770 static int of_spi_notify(struct notifier_block
*nb
, unsigned long action
,
3773 struct of_reconfig_data
*rd
= arg
;
3774 struct spi_controller
*ctlr
;
3775 struct spi_device
*spi
;
3777 switch (of_reconfig_get_state_change(action
, arg
)) {
3778 case OF_RECONFIG_CHANGE_ADD
:
3779 ctlr
= of_find_spi_controller_by_node(rd
->dn
->parent
);
3781 return NOTIFY_OK
; /* not for us */
3783 if (of_node_test_and_set_flag(rd
->dn
, OF_POPULATED
)) {
3784 put_device(&ctlr
->dev
);
3788 spi
= of_register_spi_device(ctlr
, rd
->dn
);
3789 put_device(&ctlr
->dev
);
3792 pr_err("%s: failed to create for '%pOF'\n",
3794 of_node_clear_flag(rd
->dn
, OF_POPULATED
);
3795 return notifier_from_errno(PTR_ERR(spi
));
3799 case OF_RECONFIG_CHANGE_REMOVE
:
3800 /* already depopulated? */
3801 if (!of_node_check_flag(rd
->dn
, OF_POPULATED
))
3804 /* find our device by node */
3805 spi
= of_find_spi_device_by_node(rd
->dn
);
3807 return NOTIFY_OK
; /* no? not meant for us */
3809 /* unregister takes one ref away */
3810 spi_unregister_device(spi
);
3812 /* and put the reference of the find */
3813 put_device(&spi
->dev
);
3820 static struct notifier_block spi_of_notifier
= {
3821 .notifier_call
= of_spi_notify
,
3823 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3824 extern struct notifier_block spi_of_notifier
;
3825 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3827 #if IS_ENABLED(CONFIG_ACPI)
3828 static int spi_acpi_controller_match(struct device
*dev
, const void *data
)
3830 return ACPI_COMPANION(dev
->parent
) == data
;
3833 static struct spi_controller
*acpi_spi_find_controller_by_adev(struct acpi_device
*adev
)
3837 dev
= class_find_device(&spi_master_class
, NULL
, adev
,
3838 spi_acpi_controller_match
);
3839 if (!dev
&& IS_ENABLED(CONFIG_SPI_SLAVE
))
3840 dev
= class_find_device(&spi_slave_class
, NULL
, adev
,
3841 spi_acpi_controller_match
);
3845 return container_of(dev
, struct spi_controller
, dev
);
3848 static struct spi_device
*acpi_spi_find_device_by_adev(struct acpi_device
*adev
)
3852 dev
= bus_find_device_by_acpi_dev(&spi_bus_type
, adev
);
3853 return dev
? to_spi_device(dev
) : NULL
;
3856 static int acpi_spi_notify(struct notifier_block
*nb
, unsigned long value
,
3859 struct acpi_device
*adev
= arg
;
3860 struct spi_controller
*ctlr
;
3861 struct spi_device
*spi
;
3864 case ACPI_RECONFIG_DEVICE_ADD
:
3865 ctlr
= acpi_spi_find_controller_by_adev(adev
->parent
);
3869 acpi_register_spi_device(ctlr
, adev
);
3870 put_device(&ctlr
->dev
);
3872 case ACPI_RECONFIG_DEVICE_REMOVE
:
3873 if (!acpi_device_enumerated(adev
))
3876 spi
= acpi_spi_find_device_by_adev(adev
);
3880 spi_unregister_device(spi
);
3881 put_device(&spi
->dev
);
3888 static struct notifier_block spi_acpi_notifier
= {
3889 .notifier_call
= acpi_spi_notify
,
3892 extern struct notifier_block spi_acpi_notifier
;
3895 static int __init
spi_init(void)
3899 buf
= kmalloc(SPI_BUFSIZ
, GFP_KERNEL
);
3905 status
= bus_register(&spi_bus_type
);
3909 status
= class_register(&spi_master_class
);
3913 if (IS_ENABLED(CONFIG_SPI_SLAVE
)) {
3914 status
= class_register(&spi_slave_class
);
3919 if (IS_ENABLED(CONFIG_OF_DYNAMIC
))
3920 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier
));
3921 if (IS_ENABLED(CONFIG_ACPI
))
3922 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier
));
3927 class_unregister(&spi_master_class
);
3929 bus_unregister(&spi_bus_type
);
3937 /* board_info is normally registered in arch_initcall(),
3938 * but even essential drivers wait till later
3940 * REVISIT only boardinfo really needs static linking. the rest (device and
3941 * driver registration) _could_ be dynamically linked (modular) ... costs
3942 * include needing to have boardinfo data structures be much more public.
3944 postcore_initcall(spi_init
);