4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/property.h>
35 #include <linux/export.h>
36 #include <linux/sched/rt.h>
37 #include <uapi/linux/sched/types.h>
38 #include <linux/delay.h>
39 #include <linux/kthread.h>
40 #include <linux/ioport.h>
41 #include <linux/acpi.h>
42 #include <linux/highmem.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/spi.h>
47 static void spidev_release(struct device
*dev
)
49 struct spi_device
*spi
= to_spi_device(dev
);
51 /* spi controllers may cleanup for released devices */
52 if (spi
->controller
->cleanup
)
53 spi
->controller
->cleanup(spi
);
55 spi_controller_put(spi
->controller
);
60 modalias_show(struct device
*dev
, struct device_attribute
*a
, char *buf
)
62 const struct spi_device
*spi
= to_spi_device(dev
);
65 len
= acpi_device_modalias(dev
, buf
, PAGE_SIZE
- 1);
69 return sprintf(buf
, "%s%s\n", SPI_MODULE_PREFIX
, spi
->modalias
);
71 static DEVICE_ATTR_RO(modalias
);
73 #define SPI_STATISTICS_ATTRS(field, file) \
74 static ssize_t spi_controller_##field##_show(struct device *dev, \
75 struct device_attribute *attr, \
78 struct spi_controller *ctlr = container_of(dev, \
79 struct spi_controller, dev); \
80 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
82 static struct device_attribute dev_attr_spi_controller_##field = { \
83 .attr = { .name = file, .mode = 0444 }, \
84 .show = spi_controller_##field##_show, \
86 static ssize_t spi_device_##field##_show(struct device *dev, \
87 struct device_attribute *attr, \
90 struct spi_device *spi = to_spi_device(dev); \
91 return spi_statistics_##field##_show(&spi->statistics, buf); \
93 static struct device_attribute dev_attr_spi_device_##field = { \
94 .attr = { .name = file, .mode = 0444 }, \
95 .show = spi_device_##field##_show, \
98 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
99 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
102 unsigned long flags; \
104 spin_lock_irqsave(&stat->lock, flags); \
105 len = sprintf(buf, format_string, stat->field); \
106 spin_unlock_irqrestore(&stat->lock, flags); \
109 SPI_STATISTICS_ATTRS(name, file)
111 #define SPI_STATISTICS_SHOW(field, format_string) \
112 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
113 field, format_string)
115 SPI_STATISTICS_SHOW(messages
, "%lu");
116 SPI_STATISTICS_SHOW(transfers
, "%lu");
117 SPI_STATISTICS_SHOW(errors
, "%lu");
118 SPI_STATISTICS_SHOW(timedout
, "%lu");
120 SPI_STATISTICS_SHOW(spi_sync
, "%lu");
121 SPI_STATISTICS_SHOW(spi_sync_immediate
, "%lu");
122 SPI_STATISTICS_SHOW(spi_async
, "%lu");
124 SPI_STATISTICS_SHOW(bytes
, "%llu");
125 SPI_STATISTICS_SHOW(bytes_rx
, "%llu");
126 SPI_STATISTICS_SHOW(bytes_tx
, "%llu");
128 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
129 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
130 "transfer_bytes_histo_" number, \
131 transfer_bytes_histo[index], "%lu")
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
147 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
148 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
150 SPI_STATISTICS_SHOW(transfers_split_maxsize
, "%lu");
152 static struct attribute
*spi_dev_attrs
[] = {
153 &dev_attr_modalias
.attr
,
157 static const struct attribute_group spi_dev_group
= {
158 .attrs
= spi_dev_attrs
,
161 static struct attribute
*spi_device_statistics_attrs
[] = {
162 &dev_attr_spi_device_messages
.attr
,
163 &dev_attr_spi_device_transfers
.attr
,
164 &dev_attr_spi_device_errors
.attr
,
165 &dev_attr_spi_device_timedout
.attr
,
166 &dev_attr_spi_device_spi_sync
.attr
,
167 &dev_attr_spi_device_spi_sync_immediate
.attr
,
168 &dev_attr_spi_device_spi_async
.attr
,
169 &dev_attr_spi_device_bytes
.attr
,
170 &dev_attr_spi_device_bytes_rx
.attr
,
171 &dev_attr_spi_device_bytes_tx
.attr
,
172 &dev_attr_spi_device_transfer_bytes_histo0
.attr
,
173 &dev_attr_spi_device_transfer_bytes_histo1
.attr
,
174 &dev_attr_spi_device_transfer_bytes_histo2
.attr
,
175 &dev_attr_spi_device_transfer_bytes_histo3
.attr
,
176 &dev_attr_spi_device_transfer_bytes_histo4
.attr
,
177 &dev_attr_spi_device_transfer_bytes_histo5
.attr
,
178 &dev_attr_spi_device_transfer_bytes_histo6
.attr
,
179 &dev_attr_spi_device_transfer_bytes_histo7
.attr
,
180 &dev_attr_spi_device_transfer_bytes_histo8
.attr
,
181 &dev_attr_spi_device_transfer_bytes_histo9
.attr
,
182 &dev_attr_spi_device_transfer_bytes_histo10
.attr
,
183 &dev_attr_spi_device_transfer_bytes_histo11
.attr
,
184 &dev_attr_spi_device_transfer_bytes_histo12
.attr
,
185 &dev_attr_spi_device_transfer_bytes_histo13
.attr
,
186 &dev_attr_spi_device_transfer_bytes_histo14
.attr
,
187 &dev_attr_spi_device_transfer_bytes_histo15
.attr
,
188 &dev_attr_spi_device_transfer_bytes_histo16
.attr
,
189 &dev_attr_spi_device_transfers_split_maxsize
.attr
,
193 static const struct attribute_group spi_device_statistics_group
= {
194 .name
= "statistics",
195 .attrs
= spi_device_statistics_attrs
,
198 static const struct attribute_group
*spi_dev_groups
[] = {
200 &spi_device_statistics_group
,
204 static struct attribute
*spi_controller_statistics_attrs
[] = {
205 &dev_attr_spi_controller_messages
.attr
,
206 &dev_attr_spi_controller_transfers
.attr
,
207 &dev_attr_spi_controller_errors
.attr
,
208 &dev_attr_spi_controller_timedout
.attr
,
209 &dev_attr_spi_controller_spi_sync
.attr
,
210 &dev_attr_spi_controller_spi_sync_immediate
.attr
,
211 &dev_attr_spi_controller_spi_async
.attr
,
212 &dev_attr_spi_controller_bytes
.attr
,
213 &dev_attr_spi_controller_bytes_rx
.attr
,
214 &dev_attr_spi_controller_bytes_tx
.attr
,
215 &dev_attr_spi_controller_transfer_bytes_histo0
.attr
,
216 &dev_attr_spi_controller_transfer_bytes_histo1
.attr
,
217 &dev_attr_spi_controller_transfer_bytes_histo2
.attr
,
218 &dev_attr_spi_controller_transfer_bytes_histo3
.attr
,
219 &dev_attr_spi_controller_transfer_bytes_histo4
.attr
,
220 &dev_attr_spi_controller_transfer_bytes_histo5
.attr
,
221 &dev_attr_spi_controller_transfer_bytes_histo6
.attr
,
222 &dev_attr_spi_controller_transfer_bytes_histo7
.attr
,
223 &dev_attr_spi_controller_transfer_bytes_histo8
.attr
,
224 &dev_attr_spi_controller_transfer_bytes_histo9
.attr
,
225 &dev_attr_spi_controller_transfer_bytes_histo10
.attr
,
226 &dev_attr_spi_controller_transfer_bytes_histo11
.attr
,
227 &dev_attr_spi_controller_transfer_bytes_histo12
.attr
,
228 &dev_attr_spi_controller_transfer_bytes_histo13
.attr
,
229 &dev_attr_spi_controller_transfer_bytes_histo14
.attr
,
230 &dev_attr_spi_controller_transfer_bytes_histo15
.attr
,
231 &dev_attr_spi_controller_transfer_bytes_histo16
.attr
,
232 &dev_attr_spi_controller_transfers_split_maxsize
.attr
,
236 static const struct attribute_group spi_controller_statistics_group
= {
237 .name
= "statistics",
238 .attrs
= spi_controller_statistics_attrs
,
241 static const struct attribute_group
*spi_master_groups
[] = {
242 &spi_controller_statistics_group
,
246 void spi_statistics_add_transfer_stats(struct spi_statistics
*stats
,
247 struct spi_transfer
*xfer
,
248 struct spi_controller
*ctlr
)
251 int l2len
= min(fls(xfer
->len
), SPI_STATISTICS_HISTO_SIZE
) - 1;
256 spin_lock_irqsave(&stats
->lock
, flags
);
259 stats
->transfer_bytes_histo
[l2len
]++;
261 stats
->bytes
+= xfer
->len
;
262 if ((xfer
->tx_buf
) &&
263 (xfer
->tx_buf
!= ctlr
->dummy_tx
))
264 stats
->bytes_tx
+= xfer
->len
;
265 if ((xfer
->rx_buf
) &&
266 (xfer
->rx_buf
!= ctlr
->dummy_rx
))
267 stats
->bytes_rx
+= xfer
->len
;
269 spin_unlock_irqrestore(&stats
->lock
, flags
);
271 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats
);
273 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
274 * and the sysfs version makes coldplug work too.
277 static const struct spi_device_id
*spi_match_id(const struct spi_device_id
*id
,
278 const struct spi_device
*sdev
)
280 while (id
->name
[0]) {
281 if (!strcmp(sdev
->modalias
, id
->name
))
288 const struct spi_device_id
*spi_get_device_id(const struct spi_device
*sdev
)
290 const struct spi_driver
*sdrv
= to_spi_driver(sdev
->dev
.driver
);
292 return spi_match_id(sdrv
->id_table
, sdev
);
294 EXPORT_SYMBOL_GPL(spi_get_device_id
);
296 static int spi_match_device(struct device
*dev
, struct device_driver
*drv
)
298 const struct spi_device
*spi
= to_spi_device(dev
);
299 const struct spi_driver
*sdrv
= to_spi_driver(drv
);
301 /* Attempt an OF style match */
302 if (of_driver_match_device(dev
, drv
))
306 if (acpi_driver_match_device(dev
, drv
))
310 return !!spi_match_id(sdrv
->id_table
, spi
);
312 return strcmp(spi
->modalias
, drv
->name
) == 0;
315 static int spi_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
317 const struct spi_device
*spi
= to_spi_device(dev
);
320 rc
= acpi_device_uevent_modalias(dev
, env
);
324 add_uevent_var(env
, "MODALIAS=%s%s", SPI_MODULE_PREFIX
, spi
->modalias
);
328 struct bus_type spi_bus_type
= {
330 .dev_groups
= spi_dev_groups
,
331 .match
= spi_match_device
,
332 .uevent
= spi_uevent
,
334 EXPORT_SYMBOL_GPL(spi_bus_type
);
337 static int spi_drv_probe(struct device
*dev
)
339 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
340 struct spi_device
*spi
= to_spi_device(dev
);
343 ret
= of_clk_set_defaults(dev
->of_node
, false);
348 spi
->irq
= of_irq_get(dev
->of_node
, 0);
349 if (spi
->irq
== -EPROBE_DEFER
)
350 return -EPROBE_DEFER
;
355 ret
= dev_pm_domain_attach(dev
, true);
356 if (ret
!= -EPROBE_DEFER
) {
357 ret
= sdrv
->probe(spi
);
359 dev_pm_domain_detach(dev
, true);
365 static int spi_drv_remove(struct device
*dev
)
367 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
370 ret
= sdrv
->remove(to_spi_device(dev
));
371 dev_pm_domain_detach(dev
, true);
376 static void spi_drv_shutdown(struct device
*dev
)
378 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
380 sdrv
->shutdown(to_spi_device(dev
));
384 * __spi_register_driver - register a SPI driver
385 * @owner: owner module of the driver to register
386 * @sdrv: the driver to register
389 * Return: zero on success, else a negative error code.
391 int __spi_register_driver(struct module
*owner
, struct spi_driver
*sdrv
)
393 sdrv
->driver
.owner
= owner
;
394 sdrv
->driver
.bus
= &spi_bus_type
;
396 sdrv
->driver
.probe
= spi_drv_probe
;
398 sdrv
->driver
.remove
= spi_drv_remove
;
400 sdrv
->driver
.shutdown
= spi_drv_shutdown
;
401 return driver_register(&sdrv
->driver
);
403 EXPORT_SYMBOL_GPL(__spi_register_driver
);
405 /*-------------------------------------------------------------------------*/
407 /* SPI devices should normally not be created by SPI device drivers; that
408 * would make them board-specific. Similarly with SPI controller drivers.
409 * Device registration normally goes into like arch/.../mach.../board-YYY.c
410 * with other readonly (flashable) information about mainboard devices.
414 struct list_head list
;
415 struct spi_board_info board_info
;
418 static LIST_HEAD(board_list
);
419 static LIST_HEAD(spi_controller_list
);
422 * Used to protect add/del opertion for board_info list and
423 * spi_controller list, and their matching process
425 static DEFINE_MUTEX(board_lock
);
428 * spi_alloc_device - Allocate a new SPI device
429 * @ctlr: Controller to which device is connected
432 * Allows a driver to allocate and initialize a spi_device without
433 * registering it immediately. This allows a driver to directly
434 * fill the spi_device with device parameters before calling
435 * spi_add_device() on it.
437 * Caller is responsible to call spi_add_device() on the returned
438 * spi_device structure to add it to the SPI controller. If the caller
439 * needs to discard the spi_device without adding it, then it should
440 * call spi_dev_put() on it.
442 * Return: a pointer to the new device, or NULL.
444 struct spi_device
*spi_alloc_device(struct spi_controller
*ctlr
)
446 struct spi_device
*spi
;
448 if (!spi_controller_get(ctlr
))
451 spi
= kzalloc(sizeof(*spi
), GFP_KERNEL
);
453 spi_controller_put(ctlr
);
457 spi
->master
= spi
->controller
= ctlr
;
458 spi
->dev
.parent
= &ctlr
->dev
;
459 spi
->dev
.bus
= &spi_bus_type
;
460 spi
->dev
.release
= spidev_release
;
461 spi
->cs_gpio
= -ENOENT
;
463 spin_lock_init(&spi
->statistics
.lock
);
465 device_initialize(&spi
->dev
);
468 EXPORT_SYMBOL_GPL(spi_alloc_device
);
470 static void spi_dev_set_name(struct spi_device
*spi
)
472 struct acpi_device
*adev
= ACPI_COMPANION(&spi
->dev
);
475 dev_set_name(&spi
->dev
, "spi-%s", acpi_dev_name(adev
));
479 dev_set_name(&spi
->dev
, "%s.%u", dev_name(&spi
->controller
->dev
),
483 static int spi_dev_check(struct device
*dev
, void *data
)
485 struct spi_device
*spi
= to_spi_device(dev
);
486 struct spi_device
*new_spi
= data
;
488 if (spi
->controller
== new_spi
->controller
&&
489 spi
->chip_select
== new_spi
->chip_select
)
495 * spi_add_device - Add spi_device allocated with spi_alloc_device
496 * @spi: spi_device to register
498 * Companion function to spi_alloc_device. Devices allocated with
499 * spi_alloc_device can be added onto the spi bus with this function.
501 * Return: 0 on success; negative errno on failure
503 int spi_add_device(struct spi_device
*spi
)
505 static DEFINE_MUTEX(spi_add_lock
);
506 struct spi_controller
*ctlr
= spi
->controller
;
507 struct device
*dev
= ctlr
->dev
.parent
;
510 /* Chipselects are numbered 0..max; validate. */
511 if (spi
->chip_select
>= ctlr
->num_chipselect
) {
512 dev_err(dev
, "cs%d >= max %d\n", spi
->chip_select
,
513 ctlr
->num_chipselect
);
517 /* Set the bus ID string */
518 spi_dev_set_name(spi
);
520 /* We need to make sure there's no other device with this
521 * chipselect **BEFORE** we call setup(), else we'll trash
522 * its configuration. Lock against concurrent add() calls.
524 mutex_lock(&spi_add_lock
);
526 status
= bus_for_each_dev(&spi_bus_type
, NULL
, spi
, spi_dev_check
);
528 dev_err(dev
, "chipselect %d already in use\n",
534 spi
->cs_gpio
= ctlr
->cs_gpios
[spi
->chip_select
];
536 /* Drivers may modify this initial i/o setup, but will
537 * normally rely on the device being setup. Devices
538 * using SPI_CS_HIGH can't coexist well otherwise...
540 status
= spi_setup(spi
);
542 dev_err(dev
, "can't setup %s, status %d\n",
543 dev_name(&spi
->dev
), status
);
547 /* Device may be bound to an active driver when this returns */
548 status
= device_add(&spi
->dev
);
550 dev_err(dev
, "can't add %s, status %d\n",
551 dev_name(&spi
->dev
), status
);
553 dev_dbg(dev
, "registered child %s\n", dev_name(&spi
->dev
));
556 mutex_unlock(&spi_add_lock
);
559 EXPORT_SYMBOL_GPL(spi_add_device
);
562 * spi_new_device - instantiate one new SPI device
563 * @ctlr: Controller to which device is connected
564 * @chip: Describes the SPI device
567 * On typical mainboards, this is purely internal; and it's not needed
568 * after board init creates the hard-wired devices. Some development
569 * platforms may not be able to use spi_register_board_info though, and
570 * this is exported so that for example a USB or parport based adapter
571 * driver could add devices (which it would learn about out-of-band).
573 * Return: the new device, or NULL.
575 struct spi_device
*spi_new_device(struct spi_controller
*ctlr
,
576 struct spi_board_info
*chip
)
578 struct spi_device
*proxy
;
581 /* NOTE: caller did any chip->bus_num checks necessary.
583 * Also, unless we change the return value convention to use
584 * error-or-pointer (not NULL-or-pointer), troubleshootability
585 * suggests syslogged diagnostics are best here (ugh).
588 proxy
= spi_alloc_device(ctlr
);
592 WARN_ON(strlen(chip
->modalias
) >= sizeof(proxy
->modalias
));
594 proxy
->chip_select
= chip
->chip_select
;
595 proxy
->max_speed_hz
= chip
->max_speed_hz
;
596 proxy
->mode
= chip
->mode
;
597 proxy
->irq
= chip
->irq
;
598 strlcpy(proxy
->modalias
, chip
->modalias
, sizeof(proxy
->modalias
));
599 proxy
->dev
.platform_data
= (void *) chip
->platform_data
;
600 proxy
->controller_data
= chip
->controller_data
;
601 proxy
->controller_state
= NULL
;
603 if (chip
->properties
) {
604 status
= device_add_properties(&proxy
->dev
, chip
->properties
);
607 "failed to add properties to '%s': %d\n",
608 chip
->modalias
, status
);
613 status
= spi_add_device(proxy
);
615 goto err_remove_props
;
620 if (chip
->properties
)
621 device_remove_properties(&proxy
->dev
);
626 EXPORT_SYMBOL_GPL(spi_new_device
);
629 * spi_unregister_device - unregister a single SPI device
630 * @spi: spi_device to unregister
632 * Start making the passed SPI device vanish. Normally this would be handled
633 * by spi_unregister_controller().
635 void spi_unregister_device(struct spi_device
*spi
)
640 if (spi
->dev
.of_node
) {
641 of_node_clear_flag(spi
->dev
.of_node
, OF_POPULATED
);
642 of_node_put(spi
->dev
.of_node
);
644 if (ACPI_COMPANION(&spi
->dev
))
645 acpi_device_clear_enumerated(ACPI_COMPANION(&spi
->dev
));
646 device_unregister(&spi
->dev
);
648 EXPORT_SYMBOL_GPL(spi_unregister_device
);
650 static void spi_match_controller_to_boardinfo(struct spi_controller
*ctlr
,
651 struct spi_board_info
*bi
)
653 struct spi_device
*dev
;
655 if (ctlr
->bus_num
!= bi
->bus_num
)
658 dev
= spi_new_device(ctlr
, bi
);
660 dev_err(ctlr
->dev
.parent
, "can't create new device for %s\n",
665 * spi_register_board_info - register SPI devices for a given board
666 * @info: array of chip descriptors
667 * @n: how many descriptors are provided
670 * Board-specific early init code calls this (probably during arch_initcall)
671 * with segments of the SPI device table. Any device nodes are created later,
672 * after the relevant parent SPI controller (bus_num) is defined. We keep
673 * this table of devices forever, so that reloading a controller driver will
674 * not make Linux forget about these hard-wired devices.
676 * Other code can also call this, e.g. a particular add-on board might provide
677 * SPI devices through its expansion connector, so code initializing that board
678 * would naturally declare its SPI devices.
680 * The board info passed can safely be __initdata ... but be careful of
681 * any embedded pointers (platform_data, etc), they're copied as-is.
682 * Device properties are deep-copied though.
684 * Return: zero on success, else a negative error code.
686 int spi_register_board_info(struct spi_board_info
const *info
, unsigned n
)
688 struct boardinfo
*bi
;
694 bi
= kcalloc(n
, sizeof(*bi
), GFP_KERNEL
);
698 for (i
= 0; i
< n
; i
++, bi
++, info
++) {
699 struct spi_controller
*ctlr
;
701 memcpy(&bi
->board_info
, info
, sizeof(*info
));
702 if (info
->properties
) {
703 bi
->board_info
.properties
=
704 property_entries_dup(info
->properties
);
705 if (IS_ERR(bi
->board_info
.properties
))
706 return PTR_ERR(bi
->board_info
.properties
);
709 mutex_lock(&board_lock
);
710 list_add_tail(&bi
->list
, &board_list
);
711 list_for_each_entry(ctlr
, &spi_controller_list
, list
)
712 spi_match_controller_to_boardinfo(ctlr
,
714 mutex_unlock(&board_lock
);
720 /*-------------------------------------------------------------------------*/
722 static void spi_set_cs(struct spi_device
*spi
, bool enable
)
724 if (spi
->mode
& SPI_CS_HIGH
)
727 if (gpio_is_valid(spi
->cs_gpio
)) {
728 gpio_set_value(spi
->cs_gpio
, !enable
);
729 /* Some SPI masters need both GPIO CS & slave_select */
730 if ((spi
->controller
->flags
& SPI_MASTER_GPIO_SS
) &&
731 spi
->controller
->set_cs
)
732 spi
->controller
->set_cs(spi
, !enable
);
733 } else if (spi
->controller
->set_cs
) {
734 spi
->controller
->set_cs(spi
, !enable
);
738 #ifdef CONFIG_HAS_DMA
739 static int spi_map_buf(struct spi_controller
*ctlr
, struct device
*dev
,
740 struct sg_table
*sgt
, void *buf
, size_t len
,
741 enum dma_data_direction dir
)
743 const bool vmalloced_buf
= is_vmalloc_addr(buf
);
744 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
745 #ifdef CONFIG_HIGHMEM
746 const bool kmap_buf
= ((unsigned long)buf
>= PKMAP_BASE
&&
747 (unsigned long)buf
< (PKMAP_BASE
+
748 (LAST_PKMAP
* PAGE_SIZE
)));
750 const bool kmap_buf
= false;
754 struct page
*vm_page
;
755 struct scatterlist
*sg
;
760 if (vmalloced_buf
|| kmap_buf
) {
761 desc_len
= min_t(int, max_seg_size
, PAGE_SIZE
);
762 sgs
= DIV_ROUND_UP(len
+ offset_in_page(buf
), desc_len
);
763 } else if (virt_addr_valid(buf
)) {
764 desc_len
= min_t(int, max_seg_size
, ctlr
->max_dma_len
);
765 sgs
= DIV_ROUND_UP(len
, desc_len
);
770 ret
= sg_alloc_table(sgt
, sgs
, GFP_KERNEL
);
775 for (i
= 0; i
< sgs
; i
++) {
777 if (vmalloced_buf
|| kmap_buf
) {
779 len
, desc_len
- offset_in_page(buf
));
781 vm_page
= vmalloc_to_page(buf
);
783 vm_page
= kmap_to_page(buf
);
788 sg_set_page(sg
, vm_page
,
789 min
, offset_in_page(buf
));
791 min
= min_t(size_t, len
, desc_len
);
793 sg_set_buf(sg
, sg_buf
, min
);
801 ret
= dma_map_sg(dev
, sgt
->sgl
, sgt
->nents
, dir
);
814 static void spi_unmap_buf(struct spi_controller
*ctlr
, struct device
*dev
,
815 struct sg_table
*sgt
, enum dma_data_direction dir
)
817 if (sgt
->orig_nents
) {
818 dma_unmap_sg(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
823 static int __spi_map_msg(struct spi_controller
*ctlr
, struct spi_message
*msg
)
825 struct device
*tx_dev
, *rx_dev
;
826 struct spi_transfer
*xfer
;
833 tx_dev
= ctlr
->dma_tx
->device
->dev
;
835 tx_dev
= ctlr
->dev
.parent
;
838 rx_dev
= ctlr
->dma_rx
->device
->dev
;
840 rx_dev
= ctlr
->dev
.parent
;
842 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
843 if (!ctlr
->can_dma(ctlr
, msg
->spi
, xfer
))
846 if (xfer
->tx_buf
!= NULL
) {
847 ret
= spi_map_buf(ctlr
, tx_dev
, &xfer
->tx_sg
,
848 (void *)xfer
->tx_buf
, xfer
->len
,
854 if (xfer
->rx_buf
!= NULL
) {
855 ret
= spi_map_buf(ctlr
, rx_dev
, &xfer
->rx_sg
,
856 xfer
->rx_buf
, xfer
->len
,
859 spi_unmap_buf(ctlr
, tx_dev
, &xfer
->tx_sg
,
866 ctlr
->cur_msg_mapped
= true;
871 static int __spi_unmap_msg(struct spi_controller
*ctlr
, struct spi_message
*msg
)
873 struct spi_transfer
*xfer
;
874 struct device
*tx_dev
, *rx_dev
;
876 if (!ctlr
->cur_msg_mapped
|| !ctlr
->can_dma
)
880 tx_dev
= ctlr
->dma_tx
->device
->dev
;
882 tx_dev
= ctlr
->dev
.parent
;
885 rx_dev
= ctlr
->dma_rx
->device
->dev
;
887 rx_dev
= ctlr
->dev
.parent
;
889 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
890 if (!ctlr
->can_dma(ctlr
, msg
->spi
, xfer
))
893 spi_unmap_buf(ctlr
, rx_dev
, &xfer
->rx_sg
, DMA_FROM_DEVICE
);
894 spi_unmap_buf(ctlr
, tx_dev
, &xfer
->tx_sg
, DMA_TO_DEVICE
);
899 #else /* !CONFIG_HAS_DMA */
900 static inline int spi_map_buf(struct spi_controller
*ctlr
, struct device
*dev
,
901 struct sg_table
*sgt
, void *buf
, size_t len
,
902 enum dma_data_direction dir
)
907 static inline void spi_unmap_buf(struct spi_controller
*ctlr
,
908 struct device
*dev
, struct sg_table
*sgt
,
909 enum dma_data_direction dir
)
913 static inline int __spi_map_msg(struct spi_controller
*ctlr
,
914 struct spi_message
*msg
)
919 static inline int __spi_unmap_msg(struct spi_controller
*ctlr
,
920 struct spi_message
*msg
)
924 #endif /* !CONFIG_HAS_DMA */
926 static inline int spi_unmap_msg(struct spi_controller
*ctlr
,
927 struct spi_message
*msg
)
929 struct spi_transfer
*xfer
;
931 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
933 * Restore the original value of tx_buf or rx_buf if they are
936 if (xfer
->tx_buf
== ctlr
->dummy_tx
)
938 if (xfer
->rx_buf
== ctlr
->dummy_rx
)
942 return __spi_unmap_msg(ctlr
, msg
);
945 static int spi_map_msg(struct spi_controller
*ctlr
, struct spi_message
*msg
)
947 struct spi_transfer
*xfer
;
949 unsigned int max_tx
, max_rx
;
951 if (ctlr
->flags
& (SPI_CONTROLLER_MUST_RX
| SPI_CONTROLLER_MUST_TX
)) {
955 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
956 if ((ctlr
->flags
& SPI_CONTROLLER_MUST_TX
) &&
958 max_tx
= max(xfer
->len
, max_tx
);
959 if ((ctlr
->flags
& SPI_CONTROLLER_MUST_RX
) &&
961 max_rx
= max(xfer
->len
, max_rx
);
965 tmp
= krealloc(ctlr
->dummy_tx
, max_tx
,
966 GFP_KERNEL
| GFP_DMA
);
969 ctlr
->dummy_tx
= tmp
;
970 memset(tmp
, 0, max_tx
);
974 tmp
= krealloc(ctlr
->dummy_rx
, max_rx
,
975 GFP_KERNEL
| GFP_DMA
);
978 ctlr
->dummy_rx
= tmp
;
981 if (max_tx
|| max_rx
) {
982 list_for_each_entry(xfer
, &msg
->transfers
,
985 xfer
->tx_buf
= ctlr
->dummy_tx
;
987 xfer
->rx_buf
= ctlr
->dummy_rx
;
992 return __spi_map_msg(ctlr
, msg
);
996 * spi_transfer_one_message - Default implementation of transfer_one_message()
998 * This is a standard implementation of transfer_one_message() for
999 * drivers which implement a transfer_one() operation. It provides
1000 * standard handling of delays and chip select management.
1002 static int spi_transfer_one_message(struct spi_controller
*ctlr
,
1003 struct spi_message
*msg
)
1005 struct spi_transfer
*xfer
;
1006 bool keep_cs
= false;
1008 unsigned long long ms
= 1;
1009 struct spi_statistics
*statm
= &ctlr
->statistics
;
1010 struct spi_statistics
*stats
= &msg
->spi
->statistics
;
1012 spi_set_cs(msg
->spi
, true);
1014 SPI_STATISTICS_INCREMENT_FIELD(statm
, messages
);
1015 SPI_STATISTICS_INCREMENT_FIELD(stats
, messages
);
1017 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
1018 trace_spi_transfer_start(msg
, xfer
);
1020 spi_statistics_add_transfer_stats(statm
, xfer
, ctlr
);
1021 spi_statistics_add_transfer_stats(stats
, xfer
, ctlr
);
1023 if (xfer
->tx_buf
|| xfer
->rx_buf
) {
1024 reinit_completion(&ctlr
->xfer_completion
);
1026 ret
= ctlr
->transfer_one(ctlr
, msg
->spi
, xfer
);
1028 SPI_STATISTICS_INCREMENT_FIELD(statm
,
1030 SPI_STATISTICS_INCREMENT_FIELD(stats
,
1032 dev_err(&msg
->spi
->dev
,
1033 "SPI transfer failed: %d\n", ret
);
1039 ms
= 8LL * 1000LL * xfer
->len
;
1040 do_div(ms
, xfer
->speed_hz
);
1041 ms
+= ms
+ 200; /* some tolerance */
1046 ms
= wait_for_completion_timeout(&ctlr
->xfer_completion
,
1047 msecs_to_jiffies(ms
));
1051 SPI_STATISTICS_INCREMENT_FIELD(statm
,
1053 SPI_STATISTICS_INCREMENT_FIELD(stats
,
1055 dev_err(&msg
->spi
->dev
,
1056 "SPI transfer timed out\n");
1057 msg
->status
= -ETIMEDOUT
;
1061 dev_err(&msg
->spi
->dev
,
1062 "Bufferless transfer has length %u\n",
1066 trace_spi_transfer_stop(msg
, xfer
);
1068 if (msg
->status
!= -EINPROGRESS
)
1071 if (xfer
->delay_usecs
) {
1072 u16 us
= xfer
->delay_usecs
;
1077 usleep_range(us
, us
+ DIV_ROUND_UP(us
, 10));
1080 if (xfer
->cs_change
) {
1081 if (list_is_last(&xfer
->transfer_list
,
1085 spi_set_cs(msg
->spi
, false);
1087 spi_set_cs(msg
->spi
, true);
1091 msg
->actual_length
+= xfer
->len
;
1095 if (ret
!= 0 || !keep_cs
)
1096 spi_set_cs(msg
->spi
, false);
1098 if (msg
->status
== -EINPROGRESS
)
1101 if (msg
->status
&& ctlr
->handle_err
)
1102 ctlr
->handle_err(ctlr
, msg
);
1104 spi_res_release(ctlr
, msg
);
1106 spi_finalize_current_message(ctlr
);
1112 * spi_finalize_current_transfer - report completion of a transfer
1113 * @ctlr: the controller reporting completion
1115 * Called by SPI drivers using the core transfer_one_message()
1116 * implementation to notify it that the current interrupt driven
1117 * transfer has finished and the next one may be scheduled.
1119 void spi_finalize_current_transfer(struct spi_controller
*ctlr
)
1121 complete(&ctlr
->xfer_completion
);
1123 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer
);
1126 * __spi_pump_messages - function which processes spi message queue
1127 * @ctlr: controller to process queue for
1128 * @in_kthread: true if we are in the context of the message pump thread
1130 * This function checks if there is any spi message in the queue that
1131 * needs processing and if so call out to the driver to initialize hardware
1132 * and transfer each message.
1134 * Note that it is called both from the kthread itself and also from
1135 * inside spi_sync(); the queue extraction handling at the top of the
1136 * function should deal with this safely.
1138 static void __spi_pump_messages(struct spi_controller
*ctlr
, bool in_kthread
)
1140 unsigned long flags
;
1141 bool was_busy
= false;
1145 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1147 /* Make sure we are not already running a message */
1148 if (ctlr
->cur_msg
) {
1149 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1153 /* If another context is idling the device then defer */
1155 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1156 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1160 /* Check if the queue is idle */
1161 if (list_empty(&ctlr
->queue
) || !ctlr
->running
) {
1163 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1167 /* Only do teardown in the thread */
1169 kthread_queue_work(&ctlr
->kworker
,
1170 &ctlr
->pump_messages
);
1171 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1176 ctlr
->idling
= true;
1177 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1179 kfree(ctlr
->dummy_rx
);
1180 ctlr
->dummy_rx
= NULL
;
1181 kfree(ctlr
->dummy_tx
);
1182 ctlr
->dummy_tx
= NULL
;
1183 if (ctlr
->unprepare_transfer_hardware
&&
1184 ctlr
->unprepare_transfer_hardware(ctlr
))
1186 "failed to unprepare transfer hardware\n");
1187 if (ctlr
->auto_runtime_pm
) {
1188 pm_runtime_mark_last_busy(ctlr
->dev
.parent
);
1189 pm_runtime_put_autosuspend(ctlr
->dev
.parent
);
1191 trace_spi_controller_idle(ctlr
);
1193 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1194 ctlr
->idling
= false;
1195 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1199 /* Extract head of queue */
1201 list_first_entry(&ctlr
->queue
, struct spi_message
, queue
);
1203 list_del_init(&ctlr
->cur_msg
->queue
);
1208 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1210 mutex_lock(&ctlr
->io_mutex
);
1212 if (!was_busy
&& ctlr
->auto_runtime_pm
) {
1213 ret
= pm_runtime_get_sync(ctlr
->dev
.parent
);
1215 dev_err(&ctlr
->dev
, "Failed to power device: %d\n",
1217 mutex_unlock(&ctlr
->io_mutex
);
1223 trace_spi_controller_busy(ctlr
);
1225 if (!was_busy
&& ctlr
->prepare_transfer_hardware
) {
1226 ret
= ctlr
->prepare_transfer_hardware(ctlr
);
1229 "failed to prepare transfer hardware\n");
1231 if (ctlr
->auto_runtime_pm
)
1232 pm_runtime_put(ctlr
->dev
.parent
);
1233 mutex_unlock(&ctlr
->io_mutex
);
1238 trace_spi_message_start(ctlr
->cur_msg
);
1240 if (ctlr
->prepare_message
) {
1241 ret
= ctlr
->prepare_message(ctlr
, ctlr
->cur_msg
);
1243 dev_err(&ctlr
->dev
, "failed to prepare message: %d\n",
1245 ctlr
->cur_msg
->status
= ret
;
1246 spi_finalize_current_message(ctlr
);
1249 ctlr
->cur_msg_prepared
= true;
1252 ret
= spi_map_msg(ctlr
, ctlr
->cur_msg
);
1254 ctlr
->cur_msg
->status
= ret
;
1255 spi_finalize_current_message(ctlr
);
1259 ret
= ctlr
->transfer_one_message(ctlr
, ctlr
->cur_msg
);
1262 "failed to transfer one message from queue\n");
1267 mutex_unlock(&ctlr
->io_mutex
);
1269 /* Prod the scheduler in case transfer_one() was busy waiting */
1275 * spi_pump_messages - kthread work function which processes spi message queue
1276 * @work: pointer to kthread work struct contained in the controller struct
1278 static void spi_pump_messages(struct kthread_work
*work
)
1280 struct spi_controller
*ctlr
=
1281 container_of(work
, struct spi_controller
, pump_messages
);
1283 __spi_pump_messages(ctlr
, true);
1286 static int spi_init_queue(struct spi_controller
*ctlr
)
1288 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
- 1 };
1290 ctlr
->running
= false;
1293 kthread_init_worker(&ctlr
->kworker
);
1294 ctlr
->kworker_task
= kthread_run(kthread_worker_fn
, &ctlr
->kworker
,
1295 "%s", dev_name(&ctlr
->dev
));
1296 if (IS_ERR(ctlr
->kworker_task
)) {
1297 dev_err(&ctlr
->dev
, "failed to create message pump task\n");
1298 return PTR_ERR(ctlr
->kworker_task
);
1300 kthread_init_work(&ctlr
->pump_messages
, spi_pump_messages
);
1303 * Controller config will indicate if this controller should run the
1304 * message pump with high (realtime) priority to reduce the transfer
1305 * latency on the bus by minimising the delay between a transfer
1306 * request and the scheduling of the message pump thread. Without this
1307 * setting the message pump thread will remain at default priority.
1310 dev_info(&ctlr
->dev
,
1311 "will run message pump with realtime priority\n");
1312 sched_setscheduler(ctlr
->kworker_task
, SCHED_FIFO
, ¶m
);
1319 * spi_get_next_queued_message() - called by driver to check for queued
1321 * @ctlr: the controller to check for queued messages
1323 * If there are more messages in the queue, the next message is returned from
1326 * Return: the next message in the queue, else NULL if the queue is empty.
1328 struct spi_message
*spi_get_next_queued_message(struct spi_controller
*ctlr
)
1330 struct spi_message
*next
;
1331 unsigned long flags
;
1333 /* get a pointer to the next message, if any */
1334 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1335 next
= list_first_entry_or_null(&ctlr
->queue
, struct spi_message
,
1337 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1341 EXPORT_SYMBOL_GPL(spi_get_next_queued_message
);
1344 * spi_finalize_current_message() - the current message is complete
1345 * @ctlr: the controller to return the message to
1347 * Called by the driver to notify the core that the message in the front of the
1348 * queue is complete and can be removed from the queue.
1350 void spi_finalize_current_message(struct spi_controller
*ctlr
)
1352 struct spi_message
*mesg
;
1353 unsigned long flags
;
1356 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1357 mesg
= ctlr
->cur_msg
;
1358 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1360 spi_unmap_msg(ctlr
, mesg
);
1362 if (ctlr
->cur_msg_prepared
&& ctlr
->unprepare_message
) {
1363 ret
= ctlr
->unprepare_message(ctlr
, mesg
);
1365 dev_err(&ctlr
->dev
, "failed to unprepare message: %d\n",
1370 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1371 ctlr
->cur_msg
= NULL
;
1372 ctlr
->cur_msg_prepared
= false;
1373 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1374 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1376 trace_spi_message_done(mesg
);
1380 mesg
->complete(mesg
->context
);
1382 EXPORT_SYMBOL_GPL(spi_finalize_current_message
);
1384 static int spi_start_queue(struct spi_controller
*ctlr
)
1386 unsigned long flags
;
1388 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1390 if (ctlr
->running
|| ctlr
->busy
) {
1391 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1395 ctlr
->running
= true;
1396 ctlr
->cur_msg
= NULL
;
1397 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1399 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1404 static int spi_stop_queue(struct spi_controller
*ctlr
)
1406 unsigned long flags
;
1407 unsigned limit
= 500;
1410 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1413 * This is a bit lame, but is optimized for the common execution path.
1414 * A wait_queue on the ctlr->busy could be used, but then the common
1415 * execution path (pump_messages) would be required to call wake_up or
1416 * friends on every SPI message. Do this instead.
1418 while ((!list_empty(&ctlr
->queue
) || ctlr
->busy
) && limit
--) {
1419 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1420 usleep_range(10000, 11000);
1421 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1424 if (!list_empty(&ctlr
->queue
) || ctlr
->busy
)
1427 ctlr
->running
= false;
1429 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1432 dev_warn(&ctlr
->dev
, "could not stop message queue\n");
1438 static int spi_destroy_queue(struct spi_controller
*ctlr
)
1442 ret
= spi_stop_queue(ctlr
);
1445 * kthread_flush_worker will block until all work is done.
1446 * If the reason that stop_queue timed out is that the work will never
1447 * finish, then it does no good to call flush/stop thread, so
1451 dev_err(&ctlr
->dev
, "problem destroying queue\n");
1455 kthread_flush_worker(&ctlr
->kworker
);
1456 kthread_stop(ctlr
->kworker_task
);
1461 static int __spi_queued_transfer(struct spi_device
*spi
,
1462 struct spi_message
*msg
,
1465 struct spi_controller
*ctlr
= spi
->controller
;
1466 unsigned long flags
;
1468 spin_lock_irqsave(&ctlr
->queue_lock
, flags
);
1470 if (!ctlr
->running
) {
1471 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1474 msg
->actual_length
= 0;
1475 msg
->status
= -EINPROGRESS
;
1477 list_add_tail(&msg
->queue
, &ctlr
->queue
);
1478 if (!ctlr
->busy
&& need_pump
)
1479 kthread_queue_work(&ctlr
->kworker
, &ctlr
->pump_messages
);
1481 spin_unlock_irqrestore(&ctlr
->queue_lock
, flags
);
1486 * spi_queued_transfer - transfer function for queued transfers
1487 * @spi: spi device which is requesting transfer
1488 * @msg: spi message which is to handled is queued to driver queue
1490 * Return: zero on success, else a negative error code.
1492 static int spi_queued_transfer(struct spi_device
*spi
, struct spi_message
*msg
)
1494 return __spi_queued_transfer(spi
, msg
, true);
1497 static int spi_controller_initialize_queue(struct spi_controller
*ctlr
)
1501 ctlr
->transfer
= spi_queued_transfer
;
1502 if (!ctlr
->transfer_one_message
)
1503 ctlr
->transfer_one_message
= spi_transfer_one_message
;
1505 /* Initialize and start queue */
1506 ret
= spi_init_queue(ctlr
);
1508 dev_err(&ctlr
->dev
, "problem initializing queue\n");
1509 goto err_init_queue
;
1511 ctlr
->queued
= true;
1512 ret
= spi_start_queue(ctlr
);
1514 dev_err(&ctlr
->dev
, "problem starting queue\n");
1515 goto err_start_queue
;
1521 spi_destroy_queue(ctlr
);
1526 /*-------------------------------------------------------------------------*/
1528 #if defined(CONFIG_OF)
1529 static int of_spi_parse_dt(struct spi_controller
*ctlr
, struct spi_device
*spi
,
1530 struct device_node
*nc
)
1535 /* Mode (clock phase/polarity/etc.) */
1536 if (of_find_property(nc
, "spi-cpha", NULL
))
1537 spi
->mode
|= SPI_CPHA
;
1538 if (of_find_property(nc
, "spi-cpol", NULL
))
1539 spi
->mode
|= SPI_CPOL
;
1540 if (of_find_property(nc
, "spi-cs-high", NULL
))
1541 spi
->mode
|= SPI_CS_HIGH
;
1542 if (of_find_property(nc
, "spi-3wire", NULL
))
1543 spi
->mode
|= SPI_3WIRE
;
1544 if (of_find_property(nc
, "spi-lsb-first", NULL
))
1545 spi
->mode
|= SPI_LSB_FIRST
;
1547 /* Device DUAL/QUAD mode */
1548 if (!of_property_read_u32(nc
, "spi-tx-bus-width", &value
)) {
1553 spi
->mode
|= SPI_TX_DUAL
;
1556 spi
->mode
|= SPI_TX_QUAD
;
1559 dev_warn(&ctlr
->dev
,
1560 "spi-tx-bus-width %d not supported\n",
1566 if (!of_property_read_u32(nc
, "spi-rx-bus-width", &value
)) {
1571 spi
->mode
|= SPI_RX_DUAL
;
1574 spi
->mode
|= SPI_RX_QUAD
;
1577 dev_warn(&ctlr
->dev
,
1578 "spi-rx-bus-width %d not supported\n",
1584 if (spi_controller_is_slave(ctlr
)) {
1585 if (strcmp(nc
->name
, "slave")) {
1586 dev_err(&ctlr
->dev
, "%s is not called 'slave'\n",
1593 /* Device address */
1594 rc
= of_property_read_u32(nc
, "reg", &value
);
1596 dev_err(&ctlr
->dev
, "%s has no valid 'reg' property (%d)\n",
1600 spi
->chip_select
= value
;
1603 rc
= of_property_read_u32(nc
, "spi-max-frequency", &value
);
1606 "%s has no valid 'spi-max-frequency' property (%d)\n",
1610 spi
->max_speed_hz
= value
;
1615 static struct spi_device
*
1616 of_register_spi_device(struct spi_controller
*ctlr
, struct device_node
*nc
)
1618 struct spi_device
*spi
;
1621 /* Alloc an spi_device */
1622 spi
= spi_alloc_device(ctlr
);
1624 dev_err(&ctlr
->dev
, "spi_device alloc error for %s\n",
1630 /* Select device driver */
1631 rc
= of_modalias_node(nc
, spi
->modalias
,
1632 sizeof(spi
->modalias
));
1634 dev_err(&ctlr
->dev
, "cannot find modalias for %s\n",
1639 rc
= of_spi_parse_dt(ctlr
, spi
, nc
);
1643 /* Store a pointer to the node in the device structure */
1645 spi
->dev
.of_node
= nc
;
1647 /* Register the new device */
1648 rc
= spi_add_device(spi
);
1650 dev_err(&ctlr
->dev
, "spi_device register error %s\n",
1652 goto err_of_node_put
;
1665 * of_register_spi_devices() - Register child devices onto the SPI bus
1666 * @ctlr: Pointer to spi_controller device
1668 * Registers an spi_device for each child node of controller node which
1669 * represents a valid SPI slave.
1671 static void of_register_spi_devices(struct spi_controller
*ctlr
)
1673 struct spi_device
*spi
;
1674 struct device_node
*nc
;
1676 if (!ctlr
->dev
.of_node
)
1679 for_each_available_child_of_node(ctlr
->dev
.of_node
, nc
) {
1680 if (of_node_test_and_set_flag(nc
, OF_POPULATED
))
1682 spi
= of_register_spi_device(ctlr
, nc
);
1684 dev_warn(&ctlr
->dev
,
1685 "Failed to create SPI device for %s\n",
1687 of_node_clear_flag(nc
, OF_POPULATED
);
1692 static void of_register_spi_devices(struct spi_controller
*ctlr
) { }
1696 static int acpi_spi_add_resource(struct acpi_resource
*ares
, void *data
)
1698 struct spi_device
*spi
= data
;
1699 struct spi_controller
*ctlr
= spi
->controller
;
1701 if (ares
->type
== ACPI_RESOURCE_TYPE_SERIAL_BUS
) {
1702 struct acpi_resource_spi_serialbus
*sb
;
1704 sb
= &ares
->data
.spi_serial_bus
;
1705 if (sb
->type
== ACPI_RESOURCE_SERIAL_TYPE_SPI
) {
1707 * ACPI DeviceSelection numbering is handled by the
1708 * host controller driver in Windows and can vary
1709 * from driver to driver. In Linux we always expect
1710 * 0 .. max - 1 so we need to ask the driver to
1711 * translate between the two schemes.
1713 if (ctlr
->fw_translate_cs
) {
1714 int cs
= ctlr
->fw_translate_cs(ctlr
,
1715 sb
->device_selection
);
1718 spi
->chip_select
= cs
;
1720 spi
->chip_select
= sb
->device_selection
;
1723 spi
->max_speed_hz
= sb
->connection_speed
;
1725 if (sb
->clock_phase
== ACPI_SPI_SECOND_PHASE
)
1726 spi
->mode
|= SPI_CPHA
;
1727 if (sb
->clock_polarity
== ACPI_SPI_START_HIGH
)
1728 spi
->mode
|= SPI_CPOL
;
1729 if (sb
->device_polarity
== ACPI_SPI_ACTIVE_HIGH
)
1730 spi
->mode
|= SPI_CS_HIGH
;
1732 } else if (spi
->irq
< 0) {
1735 if (acpi_dev_resource_interrupt(ares
, 0, &r
))
1739 /* Always tell the ACPI core to skip this resource */
1743 static acpi_status
acpi_register_spi_device(struct spi_controller
*ctlr
,
1744 struct acpi_device
*adev
)
1746 struct list_head resource_list
;
1747 struct spi_device
*spi
;
1750 if (acpi_bus_get_status(adev
) || !adev
->status
.present
||
1751 acpi_device_enumerated(adev
))
1754 spi
= spi_alloc_device(ctlr
);
1756 dev_err(&ctlr
->dev
, "failed to allocate SPI device for %s\n",
1757 dev_name(&adev
->dev
));
1758 return AE_NO_MEMORY
;
1761 ACPI_COMPANION_SET(&spi
->dev
, adev
);
1764 INIT_LIST_HEAD(&resource_list
);
1765 ret
= acpi_dev_get_resources(adev
, &resource_list
,
1766 acpi_spi_add_resource
, spi
);
1767 acpi_dev_free_resource_list(&resource_list
);
1769 if (ret
< 0 || !spi
->max_speed_hz
) {
1774 acpi_set_modalias(adev
, acpi_device_hid(adev
), spi
->modalias
,
1775 sizeof(spi
->modalias
));
1778 spi
->irq
= acpi_dev_gpio_irq_get(adev
, 0);
1780 acpi_device_set_enumerated(adev
);
1782 adev
->power
.flags
.ignore_parent
= true;
1783 if (spi_add_device(spi
)) {
1784 adev
->power
.flags
.ignore_parent
= false;
1785 dev_err(&ctlr
->dev
, "failed to add SPI device %s from ACPI\n",
1786 dev_name(&adev
->dev
));
1793 static acpi_status
acpi_spi_add_device(acpi_handle handle
, u32 level
,
1794 void *data
, void **return_value
)
1796 struct spi_controller
*ctlr
= data
;
1797 struct acpi_device
*adev
;
1799 if (acpi_bus_get_device(handle
, &adev
))
1802 return acpi_register_spi_device(ctlr
, adev
);
1805 static void acpi_register_spi_devices(struct spi_controller
*ctlr
)
1810 handle
= ACPI_HANDLE(ctlr
->dev
.parent
);
1814 status
= acpi_walk_namespace(ACPI_TYPE_DEVICE
, handle
, 1,
1815 acpi_spi_add_device
, NULL
, ctlr
, NULL
);
1816 if (ACPI_FAILURE(status
))
1817 dev_warn(&ctlr
->dev
, "failed to enumerate SPI slaves\n");
1820 static inline void acpi_register_spi_devices(struct spi_controller
*ctlr
) {}
1821 #endif /* CONFIG_ACPI */
1823 static void spi_controller_release(struct device
*dev
)
1825 struct spi_controller
*ctlr
;
1827 ctlr
= container_of(dev
, struct spi_controller
, dev
);
1831 static struct class spi_master_class
= {
1832 .name
= "spi_master",
1833 .owner
= THIS_MODULE
,
1834 .dev_release
= spi_controller_release
,
1835 .dev_groups
= spi_master_groups
,
1838 #ifdef CONFIG_SPI_SLAVE
1840 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
1842 * @spi: device used for the current transfer
1844 int spi_slave_abort(struct spi_device
*spi
)
1846 struct spi_controller
*ctlr
= spi
->controller
;
1848 if (spi_controller_is_slave(ctlr
) && ctlr
->slave_abort
)
1849 return ctlr
->slave_abort(ctlr
);
1853 EXPORT_SYMBOL_GPL(spi_slave_abort
);
1855 static int match_true(struct device
*dev
, void *data
)
1860 static ssize_t
spi_slave_show(struct device
*dev
,
1861 struct device_attribute
*attr
, char *buf
)
1863 struct spi_controller
*ctlr
= container_of(dev
, struct spi_controller
,
1865 struct device
*child
;
1867 child
= device_find_child(&ctlr
->dev
, NULL
, match_true
);
1868 return sprintf(buf
, "%s\n",
1869 child
? to_spi_device(child
)->modalias
: NULL
);
1872 static ssize_t
spi_slave_store(struct device
*dev
,
1873 struct device_attribute
*attr
, const char *buf
,
1876 struct spi_controller
*ctlr
= container_of(dev
, struct spi_controller
,
1878 struct spi_device
*spi
;
1879 struct device
*child
;
1883 rc
= sscanf(buf
, "%31s", name
);
1884 if (rc
!= 1 || !name
[0])
1887 child
= device_find_child(&ctlr
->dev
, NULL
, match_true
);
1889 /* Remove registered slave */
1890 device_unregister(child
);
1894 if (strcmp(name
, "(null)")) {
1895 /* Register new slave */
1896 spi
= spi_alloc_device(ctlr
);
1900 strlcpy(spi
->modalias
, name
, sizeof(spi
->modalias
));
1902 rc
= spi_add_device(spi
);
1912 static DEVICE_ATTR(slave
, 0644, spi_slave_show
, spi_slave_store
);
1914 static struct attribute
*spi_slave_attrs
[] = {
1915 &dev_attr_slave
.attr
,
1919 static const struct attribute_group spi_slave_group
= {
1920 .attrs
= spi_slave_attrs
,
1923 static const struct attribute_group
*spi_slave_groups
[] = {
1924 &spi_controller_statistics_group
,
1929 static struct class spi_slave_class
= {
1930 .name
= "spi_slave",
1931 .owner
= THIS_MODULE
,
1932 .dev_release
= spi_controller_release
,
1933 .dev_groups
= spi_slave_groups
,
1936 extern struct class spi_slave_class
; /* dummy */
1940 * __spi_alloc_controller - allocate an SPI master or slave controller
1941 * @dev: the controller, possibly using the platform_bus
1942 * @size: how much zeroed driver-private data to allocate; the pointer to this
1943 * memory is in the driver_data field of the returned device,
1944 * accessible with spi_controller_get_devdata().
1945 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
1946 * slave (true) controller
1947 * Context: can sleep
1949 * This call is used only by SPI controller drivers, which are the
1950 * only ones directly touching chip registers. It's how they allocate
1951 * an spi_controller structure, prior to calling spi_register_controller().
1953 * This must be called from context that can sleep.
1955 * The caller is responsible for assigning the bus number and initializing the
1956 * controller's methods before calling spi_register_controller(); and (after
1957 * errors adding the device) calling spi_controller_put() to prevent a memory
1960 * Return: the SPI controller structure on success, else NULL.
1962 struct spi_controller
*__spi_alloc_controller(struct device
*dev
,
1963 unsigned int size
, bool slave
)
1965 struct spi_controller
*ctlr
;
1970 ctlr
= kzalloc(size
+ sizeof(*ctlr
), GFP_KERNEL
);
1974 device_initialize(&ctlr
->dev
);
1976 ctlr
->num_chipselect
= 1;
1977 ctlr
->slave
= slave
;
1978 if (IS_ENABLED(CONFIG_SPI_SLAVE
) && slave
)
1979 ctlr
->dev
.class = &spi_slave_class
;
1981 ctlr
->dev
.class = &spi_master_class
;
1982 ctlr
->dev
.parent
= dev
;
1983 pm_suspend_ignore_children(&ctlr
->dev
, true);
1984 spi_controller_set_devdata(ctlr
, &ctlr
[1]);
1988 EXPORT_SYMBOL_GPL(__spi_alloc_controller
);
1991 static int of_spi_register_master(struct spi_controller
*ctlr
)
1994 struct device_node
*np
= ctlr
->dev
.of_node
;
1999 nb
= of_gpio_named_count(np
, "cs-gpios");
2000 ctlr
->num_chipselect
= max_t(int, nb
, ctlr
->num_chipselect
);
2002 /* Return error only for an incorrectly formed cs-gpios property */
2003 if (nb
== 0 || nb
== -ENOENT
)
2008 cs
= devm_kzalloc(&ctlr
->dev
, sizeof(int) * ctlr
->num_chipselect
,
2010 ctlr
->cs_gpios
= cs
;
2012 if (!ctlr
->cs_gpios
)
2015 for (i
= 0; i
< ctlr
->num_chipselect
; i
++)
2018 for (i
= 0; i
< nb
; i
++)
2019 cs
[i
] = of_get_named_gpio(np
, "cs-gpios", i
);
2024 static int of_spi_register_master(struct spi_controller
*ctlr
)
2031 * spi_register_controller - register SPI master or slave controller
2032 * @ctlr: initialized master, originally from spi_alloc_master() or
2034 * Context: can sleep
2036 * SPI controllers connect to their drivers using some non-SPI bus,
2037 * such as the platform bus. The final stage of probe() in that code
2038 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2040 * SPI controllers use board specific (often SOC specific) bus numbers,
2041 * and board-specific addressing for SPI devices combines those numbers
2042 * with chip select numbers. Since SPI does not directly support dynamic
2043 * device identification, boards need configuration tables telling which
2044 * chip is at which address.
2046 * This must be called from context that can sleep. It returns zero on
2047 * success, else a negative error code (dropping the controller's refcount).
2048 * After a successful return, the caller is responsible for calling
2049 * spi_unregister_controller().
2051 * Return: zero on success, else a negative error code.
2053 int spi_register_controller(struct spi_controller
*ctlr
)
2055 static atomic_t dyn_bus_id
= ATOMIC_INIT((1<<15) - 1);
2056 struct device
*dev
= ctlr
->dev
.parent
;
2057 struct boardinfo
*bi
;
2058 int status
= -ENODEV
;
2064 if (!spi_controller_is_slave(ctlr
)) {
2065 status
= of_spi_register_master(ctlr
);
2070 /* even if it's just one always-selected device, there must
2071 * be at least one chipselect
2073 if (ctlr
->num_chipselect
== 0)
2076 if ((ctlr
->bus_num
< 0) && ctlr
->dev
.of_node
)
2077 ctlr
->bus_num
= of_alias_get_id(ctlr
->dev
.of_node
, "spi");
2079 /* convention: dynamically assigned bus IDs count down from the max */
2080 if (ctlr
->bus_num
< 0) {
2081 /* FIXME switch to an IDR based scheme, something like
2082 * I2C now uses, so we can't run out of "dynamic" IDs
2084 ctlr
->bus_num
= atomic_dec_return(&dyn_bus_id
);
2088 INIT_LIST_HEAD(&ctlr
->queue
);
2089 spin_lock_init(&ctlr
->queue_lock
);
2090 spin_lock_init(&ctlr
->bus_lock_spinlock
);
2091 mutex_init(&ctlr
->bus_lock_mutex
);
2092 mutex_init(&ctlr
->io_mutex
);
2093 ctlr
->bus_lock_flag
= 0;
2094 init_completion(&ctlr
->xfer_completion
);
2095 if (!ctlr
->max_dma_len
)
2096 ctlr
->max_dma_len
= INT_MAX
;
2098 /* register the device, then userspace will see it.
2099 * registration fails if the bus ID is in use.
2101 dev_set_name(&ctlr
->dev
, "spi%u", ctlr
->bus_num
);
2102 status
= device_add(&ctlr
->dev
);
2105 dev_dbg(dev
, "registered %s %s%s\n",
2106 spi_controller_is_slave(ctlr
) ? "slave" : "master",
2107 dev_name(&ctlr
->dev
), dynamic
? " (dynamic)" : "");
2109 /* If we're using a queued driver, start the queue */
2111 dev_info(dev
, "controller is unqueued, this is deprecated\n");
2113 status
= spi_controller_initialize_queue(ctlr
);
2115 device_del(&ctlr
->dev
);
2119 /* add statistics */
2120 spin_lock_init(&ctlr
->statistics
.lock
);
2122 mutex_lock(&board_lock
);
2123 list_add_tail(&ctlr
->list
, &spi_controller_list
);
2124 list_for_each_entry(bi
, &board_list
, list
)
2125 spi_match_controller_to_boardinfo(ctlr
, &bi
->board_info
);
2126 mutex_unlock(&board_lock
);
2128 /* Register devices from the device tree and ACPI */
2129 of_register_spi_devices(ctlr
);
2130 acpi_register_spi_devices(ctlr
);
2134 EXPORT_SYMBOL_GPL(spi_register_controller
);
2136 static void devm_spi_unregister(struct device
*dev
, void *res
)
2138 spi_unregister_controller(*(struct spi_controller
**)res
);
2142 * devm_spi_register_controller - register managed SPI master or slave
2144 * @dev: device managing SPI controller
2145 * @ctlr: initialized controller, originally from spi_alloc_master() or
2147 * Context: can sleep
2149 * Register a SPI device as with spi_register_controller() which will
2150 * automatically be unregister
2152 * Return: zero on success, else a negative error code.
2154 int devm_spi_register_controller(struct device
*dev
,
2155 struct spi_controller
*ctlr
)
2157 struct spi_controller
**ptr
;
2160 ptr
= devres_alloc(devm_spi_unregister
, sizeof(*ptr
), GFP_KERNEL
);
2164 ret
= spi_register_controller(ctlr
);
2167 devres_add(dev
, ptr
);
2174 EXPORT_SYMBOL_GPL(devm_spi_register_controller
);
2176 static int __unregister(struct device
*dev
, void *null
)
2178 spi_unregister_device(to_spi_device(dev
));
2183 * spi_unregister_controller - unregister SPI master or slave controller
2184 * @ctlr: the controller being unregistered
2185 * Context: can sleep
2187 * This call is used only by SPI controller drivers, which are the
2188 * only ones directly touching chip registers.
2190 * This must be called from context that can sleep.
2192 void spi_unregister_controller(struct spi_controller
*ctlr
)
2197 if (spi_destroy_queue(ctlr
))
2198 dev_err(&ctlr
->dev
, "queue remove failed\n");
2201 mutex_lock(&board_lock
);
2202 list_del(&ctlr
->list
);
2203 mutex_unlock(&board_lock
);
2205 dummy
= device_for_each_child(&ctlr
->dev
, NULL
, __unregister
);
2206 device_unregister(&ctlr
->dev
);
2208 EXPORT_SYMBOL_GPL(spi_unregister_controller
);
2210 int spi_controller_suspend(struct spi_controller
*ctlr
)
2214 /* Basically no-ops for non-queued controllers */
2218 ret
= spi_stop_queue(ctlr
);
2220 dev_err(&ctlr
->dev
, "queue stop failed\n");
2224 EXPORT_SYMBOL_GPL(spi_controller_suspend
);
2226 int spi_controller_resume(struct spi_controller
*ctlr
)
2233 ret
= spi_start_queue(ctlr
);
2235 dev_err(&ctlr
->dev
, "queue restart failed\n");
2239 EXPORT_SYMBOL_GPL(spi_controller_resume
);
2241 static int __spi_controller_match(struct device
*dev
, const void *data
)
2243 struct spi_controller
*ctlr
;
2244 const u16
*bus_num
= data
;
2246 ctlr
= container_of(dev
, struct spi_controller
, dev
);
2247 return ctlr
->bus_num
== *bus_num
;
2251 * spi_busnum_to_master - look up master associated with bus_num
2252 * @bus_num: the master's bus number
2253 * Context: can sleep
2255 * This call may be used with devices that are registered after
2256 * arch init time. It returns a refcounted pointer to the relevant
2257 * spi_controller (which the caller must release), or NULL if there is
2258 * no such master registered.
2260 * Return: the SPI master structure on success, else NULL.
2262 struct spi_controller
*spi_busnum_to_master(u16 bus_num
)
2265 struct spi_controller
*ctlr
= NULL
;
2267 dev
= class_find_device(&spi_master_class
, NULL
, &bus_num
,
2268 __spi_controller_match
);
2270 ctlr
= container_of(dev
, struct spi_controller
, dev
);
2271 /* reference got in class_find_device */
2274 EXPORT_SYMBOL_GPL(spi_busnum_to_master
);
2276 /*-------------------------------------------------------------------------*/
2278 /* Core methods for SPI resource management */
2281 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2282 * during the processing of a spi_message while using
2284 * @spi: the spi device for which we allocate memory
2285 * @release: the release code to execute for this resource
2286 * @size: size to alloc and return
2287 * @gfp: GFP allocation flags
2289 * Return: the pointer to the allocated data
2291 * This may get enhanced in the future to allocate from a memory pool
2292 * of the @spi_device or @spi_controller to avoid repeated allocations.
2294 void *spi_res_alloc(struct spi_device
*spi
,
2295 spi_res_release_t release
,
2296 size_t size
, gfp_t gfp
)
2298 struct spi_res
*sres
;
2300 sres
= kzalloc(sizeof(*sres
) + size
, gfp
);
2304 INIT_LIST_HEAD(&sres
->entry
);
2305 sres
->release
= release
;
2309 EXPORT_SYMBOL_GPL(spi_res_alloc
);
2312 * spi_res_free - free an spi resource
2313 * @res: pointer to the custom data of a resource
2316 void spi_res_free(void *res
)
2318 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2323 WARN_ON(!list_empty(&sres
->entry
));
2326 EXPORT_SYMBOL_GPL(spi_res_free
);
2329 * spi_res_add - add a spi_res to the spi_message
2330 * @message: the spi message
2331 * @res: the spi_resource
2333 void spi_res_add(struct spi_message
*message
, void *res
)
2335 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2337 WARN_ON(!list_empty(&sres
->entry
));
2338 list_add_tail(&sres
->entry
, &message
->resources
);
2340 EXPORT_SYMBOL_GPL(spi_res_add
);
2343 * spi_res_release - release all spi resources for this message
2344 * @ctlr: the @spi_controller
2345 * @message: the @spi_message
2347 void spi_res_release(struct spi_controller
*ctlr
, struct spi_message
*message
)
2349 struct spi_res
*res
;
2351 while (!list_empty(&message
->resources
)) {
2352 res
= list_last_entry(&message
->resources
,
2353 struct spi_res
, entry
);
2356 res
->release(ctlr
, message
, res
->data
);
2358 list_del(&res
->entry
);
2363 EXPORT_SYMBOL_GPL(spi_res_release
);
2365 /*-------------------------------------------------------------------------*/
2367 /* Core methods for spi_message alterations */
2369 static void __spi_replace_transfers_release(struct spi_controller
*ctlr
,
2370 struct spi_message
*msg
,
2373 struct spi_replaced_transfers
*rxfer
= res
;
2376 /* call extra callback if requested */
2378 rxfer
->release(ctlr
, msg
, res
);
2380 /* insert replaced transfers back into the message */
2381 list_splice(&rxfer
->replaced_transfers
, rxfer
->replaced_after
);
2383 /* remove the formerly inserted entries */
2384 for (i
= 0; i
< rxfer
->inserted
; i
++)
2385 list_del(&rxfer
->inserted_transfers
[i
].transfer_list
);
2389 * spi_replace_transfers - replace transfers with several transfers
2390 * and register change with spi_message.resources
2391 * @msg: the spi_message we work upon
2392 * @xfer_first: the first spi_transfer we want to replace
2393 * @remove: number of transfers to remove
2394 * @insert: the number of transfers we want to insert instead
2395 * @release: extra release code necessary in some circumstances
2396 * @extradatasize: extra data to allocate (with alignment guarantees
2397 * of struct @spi_transfer)
2400 * Returns: pointer to @spi_replaced_transfers,
2401 * PTR_ERR(...) in case of errors.
2403 struct spi_replaced_transfers
*spi_replace_transfers(
2404 struct spi_message
*msg
,
2405 struct spi_transfer
*xfer_first
,
2408 spi_replaced_release_t release
,
2409 size_t extradatasize
,
2412 struct spi_replaced_transfers
*rxfer
;
2413 struct spi_transfer
*xfer
;
2416 /* allocate the structure using spi_res */
2417 rxfer
= spi_res_alloc(msg
->spi
, __spi_replace_transfers_release
,
2418 insert
* sizeof(struct spi_transfer
)
2419 + sizeof(struct spi_replaced_transfers
)
2423 return ERR_PTR(-ENOMEM
);
2425 /* the release code to invoke before running the generic release */
2426 rxfer
->release
= release
;
2428 /* assign extradata */
2431 &rxfer
->inserted_transfers
[insert
];
2433 /* init the replaced_transfers list */
2434 INIT_LIST_HEAD(&rxfer
->replaced_transfers
);
2436 /* assign the list_entry after which we should reinsert
2437 * the @replaced_transfers - it may be spi_message.messages!
2439 rxfer
->replaced_after
= xfer_first
->transfer_list
.prev
;
2441 /* remove the requested number of transfers */
2442 for (i
= 0; i
< remove
; i
++) {
2443 /* if the entry after replaced_after it is msg->transfers
2444 * then we have been requested to remove more transfers
2445 * than are in the list
2447 if (rxfer
->replaced_after
->next
== &msg
->transfers
) {
2448 dev_err(&msg
->spi
->dev
,
2449 "requested to remove more spi_transfers than are available\n");
2450 /* insert replaced transfers back into the message */
2451 list_splice(&rxfer
->replaced_transfers
,
2452 rxfer
->replaced_after
);
2454 /* free the spi_replace_transfer structure */
2455 spi_res_free(rxfer
);
2457 /* and return with an error */
2458 return ERR_PTR(-EINVAL
);
2461 /* remove the entry after replaced_after from list of
2462 * transfers and add it to list of replaced_transfers
2464 list_move_tail(rxfer
->replaced_after
->next
,
2465 &rxfer
->replaced_transfers
);
2468 /* create copy of the given xfer with identical settings
2469 * based on the first transfer to get removed
2471 for (i
= 0; i
< insert
; i
++) {
2472 /* we need to run in reverse order */
2473 xfer
= &rxfer
->inserted_transfers
[insert
- 1 - i
];
2475 /* copy all spi_transfer data */
2476 memcpy(xfer
, xfer_first
, sizeof(*xfer
));
2479 list_add(&xfer
->transfer_list
, rxfer
->replaced_after
);
2481 /* clear cs_change and delay_usecs for all but the last */
2483 xfer
->cs_change
= false;
2484 xfer
->delay_usecs
= 0;
2488 /* set up inserted */
2489 rxfer
->inserted
= insert
;
2491 /* and register it with spi_res/spi_message */
2492 spi_res_add(msg
, rxfer
);
2496 EXPORT_SYMBOL_GPL(spi_replace_transfers
);
2498 static int __spi_split_transfer_maxsize(struct spi_controller
*ctlr
,
2499 struct spi_message
*msg
,
2500 struct spi_transfer
**xferp
,
2504 struct spi_transfer
*xfer
= *xferp
, *xfers
;
2505 struct spi_replaced_transfers
*srt
;
2509 /* warn once about this fact that we are splitting a transfer */
2510 dev_warn_once(&msg
->spi
->dev
,
2511 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2512 xfer
->len
, maxsize
);
2514 /* calculate how many we have to replace */
2515 count
= DIV_ROUND_UP(xfer
->len
, maxsize
);
2517 /* create replacement */
2518 srt
= spi_replace_transfers(msg
, xfer
, 1, count
, NULL
, 0, gfp
);
2520 return PTR_ERR(srt
);
2521 xfers
= srt
->inserted_transfers
;
2523 /* now handle each of those newly inserted spi_transfers
2524 * note that the replacements spi_transfers all are preset
2525 * to the same values as *xferp, so tx_buf, rx_buf and len
2526 * are all identical (as well as most others)
2527 * so we just have to fix up len and the pointers.
2529 * this also includes support for the depreciated
2530 * spi_message.is_dma_mapped interface
2533 /* the first transfer just needs the length modified, so we
2534 * run it outside the loop
2536 xfers
[0].len
= min_t(size_t, maxsize
, xfer
[0].len
);
2538 /* all the others need rx_buf/tx_buf also set */
2539 for (i
= 1, offset
= maxsize
; i
< count
; offset
+= maxsize
, i
++) {
2540 /* update rx_buf, tx_buf and dma */
2541 if (xfers
[i
].rx_buf
)
2542 xfers
[i
].rx_buf
+= offset
;
2543 if (xfers
[i
].rx_dma
)
2544 xfers
[i
].rx_dma
+= offset
;
2545 if (xfers
[i
].tx_buf
)
2546 xfers
[i
].tx_buf
+= offset
;
2547 if (xfers
[i
].tx_dma
)
2548 xfers
[i
].tx_dma
+= offset
;
2551 xfers
[i
].len
= min(maxsize
, xfers
[i
].len
- offset
);
2554 /* we set up xferp to the last entry we have inserted,
2555 * so that we skip those already split transfers
2557 *xferp
= &xfers
[count
- 1];
2559 /* increment statistics counters */
2560 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
,
2561 transfers_split_maxsize
);
2562 SPI_STATISTICS_INCREMENT_FIELD(&msg
->spi
->statistics
,
2563 transfers_split_maxsize
);
2569 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2570 * when an individual transfer exceeds a
2572 * @ctlr: the @spi_controller for this transfer
2573 * @msg: the @spi_message to transform
2574 * @maxsize: the maximum when to apply this
2575 * @gfp: GFP allocation flags
2577 * Return: status of transformation
2579 int spi_split_transfers_maxsize(struct spi_controller
*ctlr
,
2580 struct spi_message
*msg
,
2584 struct spi_transfer
*xfer
;
2587 /* iterate over the transfer_list,
2588 * but note that xfer is advanced to the last transfer inserted
2589 * to avoid checking sizes again unnecessarily (also xfer does
2590 * potentiall belong to a different list by the time the
2591 * replacement has happened
2593 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
2594 if (xfer
->len
> maxsize
) {
2595 ret
= __spi_split_transfer_maxsize(ctlr
, msg
, &xfer
,
2604 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize
);
2606 /*-------------------------------------------------------------------------*/
2608 /* Core methods for SPI controller protocol drivers. Some of the
2609 * other core methods are currently defined as inline functions.
2612 static int __spi_validate_bits_per_word(struct spi_controller
*ctlr
,
2615 if (ctlr
->bits_per_word_mask
) {
2616 /* Only 32 bits fit in the mask */
2617 if (bits_per_word
> 32)
2619 if (!(ctlr
->bits_per_word_mask
& SPI_BPW_MASK(bits_per_word
)))
2627 * spi_setup - setup SPI mode and clock rate
2628 * @spi: the device whose settings are being modified
2629 * Context: can sleep, and no requests are queued to the device
2631 * SPI protocol drivers may need to update the transfer mode if the
2632 * device doesn't work with its default. They may likewise need
2633 * to update clock rates or word sizes from initial values. This function
2634 * changes those settings, and must be called from a context that can sleep.
2635 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2636 * effect the next time the device is selected and data is transferred to
2637 * or from it. When this function returns, the spi device is deselected.
2639 * Note that this call will fail if the protocol driver specifies an option
2640 * that the underlying controller or its driver does not support. For
2641 * example, not all hardware supports wire transfers using nine bit words,
2642 * LSB-first wire encoding, or active-high chipselects.
2644 * Return: zero on success, else a negative error code.
2646 int spi_setup(struct spi_device
*spi
)
2648 unsigned bad_bits
, ugly_bits
;
2651 /* check mode to prevent that DUAL and QUAD set at the same time
2653 if (((spi
->mode
& SPI_TX_DUAL
) && (spi
->mode
& SPI_TX_QUAD
)) ||
2654 ((spi
->mode
& SPI_RX_DUAL
) && (spi
->mode
& SPI_RX_QUAD
))) {
2656 "setup: can not select dual and quad at the same time\n");
2659 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2661 if ((spi
->mode
& SPI_3WIRE
) && (spi
->mode
&
2662 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
)))
2664 /* help drivers fail *cleanly* when they need options
2665 * that aren't supported with their current controller
2667 bad_bits
= spi
->mode
& ~spi
->controller
->mode_bits
;
2668 ugly_bits
= bad_bits
&
2669 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
);
2672 "setup: ignoring unsupported mode bits %x\n",
2674 spi
->mode
&= ~ugly_bits
;
2675 bad_bits
&= ~ugly_bits
;
2678 dev_err(&spi
->dev
, "setup: unsupported mode bits %x\n",
2683 if (!spi
->bits_per_word
)
2684 spi
->bits_per_word
= 8;
2686 status
= __spi_validate_bits_per_word(spi
->controller
,
2687 spi
->bits_per_word
);
2691 if (!spi
->max_speed_hz
)
2692 spi
->max_speed_hz
= spi
->controller
->max_speed_hz
;
2694 if (spi
->controller
->setup
)
2695 status
= spi
->controller
->setup(spi
);
2697 spi_set_cs(spi
, false);
2699 dev_dbg(&spi
->dev
, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2700 (int) (spi
->mode
& (SPI_CPOL
| SPI_CPHA
)),
2701 (spi
->mode
& SPI_CS_HIGH
) ? "cs_high, " : "",
2702 (spi
->mode
& SPI_LSB_FIRST
) ? "lsb, " : "",
2703 (spi
->mode
& SPI_3WIRE
) ? "3wire, " : "",
2704 (spi
->mode
& SPI_LOOP
) ? "loopback, " : "",
2705 spi
->bits_per_word
, spi
->max_speed_hz
,
2710 EXPORT_SYMBOL_GPL(spi_setup
);
2712 static int __spi_validate(struct spi_device
*spi
, struct spi_message
*message
)
2714 struct spi_controller
*ctlr
= spi
->controller
;
2715 struct spi_transfer
*xfer
;
2718 if (list_empty(&message
->transfers
))
2721 /* Half-duplex links include original MicroWire, and ones with
2722 * only one data pin like SPI_3WIRE (switches direction) or where
2723 * either MOSI or MISO is missing. They can also be caused by
2724 * software limitations.
2726 if ((ctlr
->flags
& SPI_CONTROLLER_HALF_DUPLEX
) ||
2727 (spi
->mode
& SPI_3WIRE
)) {
2728 unsigned flags
= ctlr
->flags
;
2730 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
2731 if (xfer
->rx_buf
&& xfer
->tx_buf
)
2733 if ((flags
& SPI_CONTROLLER_NO_TX
) && xfer
->tx_buf
)
2735 if ((flags
& SPI_CONTROLLER_NO_RX
) && xfer
->rx_buf
)
2741 * Set transfer bits_per_word and max speed as spi device default if
2742 * it is not set for this transfer.
2743 * Set transfer tx_nbits and rx_nbits as single transfer default
2744 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2746 message
->frame_length
= 0;
2747 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
2748 message
->frame_length
+= xfer
->len
;
2749 if (!xfer
->bits_per_word
)
2750 xfer
->bits_per_word
= spi
->bits_per_word
;
2752 if (!xfer
->speed_hz
)
2753 xfer
->speed_hz
= spi
->max_speed_hz
;
2754 if (!xfer
->speed_hz
)
2755 xfer
->speed_hz
= ctlr
->max_speed_hz
;
2757 if (ctlr
->max_speed_hz
&& xfer
->speed_hz
> ctlr
->max_speed_hz
)
2758 xfer
->speed_hz
= ctlr
->max_speed_hz
;
2760 if (__spi_validate_bits_per_word(ctlr
, xfer
->bits_per_word
))
2764 * SPI transfer length should be multiple of SPI word size
2765 * where SPI word size should be power-of-two multiple
2767 if (xfer
->bits_per_word
<= 8)
2769 else if (xfer
->bits_per_word
<= 16)
2774 /* No partial transfers accepted */
2775 if (xfer
->len
% w_size
)
2778 if (xfer
->speed_hz
&& ctlr
->min_speed_hz
&&
2779 xfer
->speed_hz
< ctlr
->min_speed_hz
)
2782 if (xfer
->tx_buf
&& !xfer
->tx_nbits
)
2783 xfer
->tx_nbits
= SPI_NBITS_SINGLE
;
2784 if (xfer
->rx_buf
&& !xfer
->rx_nbits
)
2785 xfer
->rx_nbits
= SPI_NBITS_SINGLE
;
2786 /* check transfer tx/rx_nbits:
2787 * 1. check the value matches one of single, dual and quad
2788 * 2. check tx/rx_nbits match the mode in spi_device
2791 if (xfer
->tx_nbits
!= SPI_NBITS_SINGLE
&&
2792 xfer
->tx_nbits
!= SPI_NBITS_DUAL
&&
2793 xfer
->tx_nbits
!= SPI_NBITS_QUAD
)
2795 if ((xfer
->tx_nbits
== SPI_NBITS_DUAL
) &&
2796 !(spi
->mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
)))
2798 if ((xfer
->tx_nbits
== SPI_NBITS_QUAD
) &&
2799 !(spi
->mode
& SPI_TX_QUAD
))
2802 /* check transfer rx_nbits */
2804 if (xfer
->rx_nbits
!= SPI_NBITS_SINGLE
&&
2805 xfer
->rx_nbits
!= SPI_NBITS_DUAL
&&
2806 xfer
->rx_nbits
!= SPI_NBITS_QUAD
)
2808 if ((xfer
->rx_nbits
== SPI_NBITS_DUAL
) &&
2809 !(spi
->mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
)))
2811 if ((xfer
->rx_nbits
== SPI_NBITS_QUAD
) &&
2812 !(spi
->mode
& SPI_RX_QUAD
))
2817 message
->status
= -EINPROGRESS
;
2822 static int __spi_async(struct spi_device
*spi
, struct spi_message
*message
)
2824 struct spi_controller
*ctlr
= spi
->controller
;
2828 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
, spi_async
);
2829 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_async
);
2831 trace_spi_message_submit(message
);
2833 return ctlr
->transfer(spi
, message
);
2837 * spi_async - asynchronous SPI transfer
2838 * @spi: device with which data will be exchanged
2839 * @message: describes the data transfers, including completion callback
2840 * Context: any (irqs may be blocked, etc)
2842 * This call may be used in_irq and other contexts which can't sleep,
2843 * as well as from task contexts which can sleep.
2845 * The completion callback is invoked in a context which can't sleep.
2846 * Before that invocation, the value of message->status is undefined.
2847 * When the callback is issued, message->status holds either zero (to
2848 * indicate complete success) or a negative error code. After that
2849 * callback returns, the driver which issued the transfer request may
2850 * deallocate the associated memory; it's no longer in use by any SPI
2851 * core or controller driver code.
2853 * Note that although all messages to a spi_device are handled in
2854 * FIFO order, messages may go to different devices in other orders.
2855 * Some device might be higher priority, or have various "hard" access
2856 * time requirements, for example.
2858 * On detection of any fault during the transfer, processing of
2859 * the entire message is aborted, and the device is deselected.
2860 * Until returning from the associated message completion callback,
2861 * no other spi_message queued to that device will be processed.
2862 * (This rule applies equally to all the synchronous transfer calls,
2863 * which are wrappers around this core asynchronous primitive.)
2865 * Return: zero on success, else a negative error code.
2867 int spi_async(struct spi_device
*spi
, struct spi_message
*message
)
2869 struct spi_controller
*ctlr
= spi
->controller
;
2871 unsigned long flags
;
2873 ret
= __spi_validate(spi
, message
);
2877 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
2879 if (ctlr
->bus_lock_flag
)
2882 ret
= __spi_async(spi
, message
);
2884 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
2888 EXPORT_SYMBOL_GPL(spi_async
);
2891 * spi_async_locked - version of spi_async with exclusive bus usage
2892 * @spi: device with which data will be exchanged
2893 * @message: describes the data transfers, including completion callback
2894 * Context: any (irqs may be blocked, etc)
2896 * This call may be used in_irq and other contexts which can't sleep,
2897 * as well as from task contexts which can sleep.
2899 * The completion callback is invoked in a context which can't sleep.
2900 * Before that invocation, the value of message->status is undefined.
2901 * When the callback is issued, message->status holds either zero (to
2902 * indicate complete success) or a negative error code. After that
2903 * callback returns, the driver which issued the transfer request may
2904 * deallocate the associated memory; it's no longer in use by any SPI
2905 * core or controller driver code.
2907 * Note that although all messages to a spi_device are handled in
2908 * FIFO order, messages may go to different devices in other orders.
2909 * Some device might be higher priority, or have various "hard" access
2910 * time requirements, for example.
2912 * On detection of any fault during the transfer, processing of
2913 * the entire message is aborted, and the device is deselected.
2914 * Until returning from the associated message completion callback,
2915 * no other spi_message queued to that device will be processed.
2916 * (This rule applies equally to all the synchronous transfer calls,
2917 * which are wrappers around this core asynchronous primitive.)
2919 * Return: zero on success, else a negative error code.
2921 int spi_async_locked(struct spi_device
*spi
, struct spi_message
*message
)
2923 struct spi_controller
*ctlr
= spi
->controller
;
2925 unsigned long flags
;
2927 ret
= __spi_validate(spi
, message
);
2931 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
2933 ret
= __spi_async(spi
, message
);
2935 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
2940 EXPORT_SYMBOL_GPL(spi_async_locked
);
2943 int spi_flash_read(struct spi_device
*spi
,
2944 struct spi_flash_read_message
*msg
)
2947 struct spi_controller
*master
= spi
->controller
;
2948 struct device
*rx_dev
= NULL
;
2951 if ((msg
->opcode_nbits
== SPI_NBITS_DUAL
||
2952 msg
->addr_nbits
== SPI_NBITS_DUAL
) &&
2953 !(spi
->mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
)))
2955 if ((msg
->opcode_nbits
== SPI_NBITS_QUAD
||
2956 msg
->addr_nbits
== SPI_NBITS_QUAD
) &&
2957 !(spi
->mode
& SPI_TX_QUAD
))
2959 if (msg
->data_nbits
== SPI_NBITS_DUAL
&&
2960 !(spi
->mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
)))
2962 if (msg
->data_nbits
== SPI_NBITS_QUAD
&&
2963 !(spi
->mode
& SPI_RX_QUAD
))
2966 if (master
->auto_runtime_pm
) {
2967 ret
= pm_runtime_get_sync(master
->dev
.parent
);
2969 dev_err(&master
->dev
, "Failed to power device: %d\n",
2975 mutex_lock(&master
->bus_lock_mutex
);
2976 mutex_lock(&master
->io_mutex
);
2977 if (master
->dma_rx
&& master
->spi_flash_can_dma(spi
, msg
)) {
2978 rx_dev
= master
->dma_rx
->device
->dev
;
2979 ret
= spi_map_buf(master
, rx_dev
, &msg
->rx_sg
,
2983 msg
->cur_msg_mapped
= true;
2985 ret
= master
->spi_flash_read(spi
, msg
);
2986 if (msg
->cur_msg_mapped
)
2987 spi_unmap_buf(master
, rx_dev
, &msg
->rx_sg
,
2989 mutex_unlock(&master
->io_mutex
);
2990 mutex_unlock(&master
->bus_lock_mutex
);
2992 if (master
->auto_runtime_pm
)
2993 pm_runtime_put(master
->dev
.parent
);
2997 EXPORT_SYMBOL_GPL(spi_flash_read
);
2999 /*-------------------------------------------------------------------------*/
3001 /* Utility methods for SPI protocol drivers, layered on
3002 * top of the core. Some other utility methods are defined as
3006 static void spi_complete(void *arg
)
3011 static int __spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
3013 DECLARE_COMPLETION_ONSTACK(done
);
3015 struct spi_controller
*ctlr
= spi
->controller
;
3016 unsigned long flags
;
3018 status
= __spi_validate(spi
, message
);
3022 message
->complete
= spi_complete
;
3023 message
->context
= &done
;
3026 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
, spi_sync
);
3027 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_sync
);
3029 /* If we're not using the legacy transfer method then we will
3030 * try to transfer in the calling context so special case.
3031 * This code would be less tricky if we could remove the
3032 * support for driver implemented message queues.
3034 if (ctlr
->transfer
== spi_queued_transfer
) {
3035 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
3037 trace_spi_message_submit(message
);
3039 status
= __spi_queued_transfer(spi
, message
, false);
3041 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
3043 status
= spi_async_locked(spi
, message
);
3047 /* Push out the messages in the calling context if we
3050 if (ctlr
->transfer
== spi_queued_transfer
) {
3051 SPI_STATISTICS_INCREMENT_FIELD(&ctlr
->statistics
,
3052 spi_sync_immediate
);
3053 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
,
3054 spi_sync_immediate
);
3055 __spi_pump_messages(ctlr
, false);
3058 wait_for_completion(&done
);
3059 status
= message
->status
;
3061 message
->context
= NULL
;
3066 * spi_sync - blocking/synchronous SPI data transfers
3067 * @spi: device with which data will be exchanged
3068 * @message: describes the data transfers
3069 * Context: can sleep
3071 * This call may only be used from a context that may sleep. The sleep
3072 * is non-interruptible, and has no timeout. Low-overhead controller
3073 * drivers may DMA directly into and out of the message buffers.
3075 * Note that the SPI device's chip select is active during the message,
3076 * and then is normally disabled between messages. Drivers for some
3077 * frequently-used devices may want to minimize costs of selecting a chip,
3078 * by leaving it selected in anticipation that the next message will go
3079 * to the same chip. (That may increase power usage.)
3081 * Also, the caller is guaranteeing that the memory associated with the
3082 * message will not be freed before this call returns.
3084 * Return: zero on success, else a negative error code.
3086 int spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
3090 mutex_lock(&spi
->controller
->bus_lock_mutex
);
3091 ret
= __spi_sync(spi
, message
);
3092 mutex_unlock(&spi
->controller
->bus_lock_mutex
);
3096 EXPORT_SYMBOL_GPL(spi_sync
);
3099 * spi_sync_locked - version of spi_sync with exclusive bus usage
3100 * @spi: device with which data will be exchanged
3101 * @message: describes the data transfers
3102 * Context: can sleep
3104 * This call may only be used from a context that may sleep. The sleep
3105 * is non-interruptible, and has no timeout. Low-overhead controller
3106 * drivers may DMA directly into and out of the message buffers.
3108 * This call should be used by drivers that require exclusive access to the
3109 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3110 * be released by a spi_bus_unlock call when the exclusive access is over.
3112 * Return: zero on success, else a negative error code.
3114 int spi_sync_locked(struct spi_device
*spi
, struct spi_message
*message
)
3116 return __spi_sync(spi
, message
);
3118 EXPORT_SYMBOL_GPL(spi_sync_locked
);
3121 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3122 * @ctlr: SPI bus master that should be locked for exclusive bus access
3123 * Context: can sleep
3125 * This call may only be used from a context that may sleep. The sleep
3126 * is non-interruptible, and has no timeout.
3128 * This call should be used by drivers that require exclusive access to the
3129 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3130 * exclusive access is over. Data transfer must be done by spi_sync_locked
3131 * and spi_async_locked calls when the SPI bus lock is held.
3133 * Return: always zero.
3135 int spi_bus_lock(struct spi_controller
*ctlr
)
3137 unsigned long flags
;
3139 mutex_lock(&ctlr
->bus_lock_mutex
);
3141 spin_lock_irqsave(&ctlr
->bus_lock_spinlock
, flags
);
3142 ctlr
->bus_lock_flag
= 1;
3143 spin_unlock_irqrestore(&ctlr
->bus_lock_spinlock
, flags
);
3145 /* mutex remains locked until spi_bus_unlock is called */
3149 EXPORT_SYMBOL_GPL(spi_bus_lock
);
3152 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3153 * @ctlr: SPI bus master that was locked for exclusive bus access
3154 * Context: can sleep
3156 * This call may only be used from a context that may sleep. The sleep
3157 * is non-interruptible, and has no timeout.
3159 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3162 * Return: always zero.
3164 int spi_bus_unlock(struct spi_controller
*ctlr
)
3166 ctlr
->bus_lock_flag
= 0;
3168 mutex_unlock(&ctlr
->bus_lock_mutex
);
3172 EXPORT_SYMBOL_GPL(spi_bus_unlock
);
3174 /* portable code must never pass more than 32 bytes */
3175 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3180 * spi_write_then_read - SPI synchronous write followed by read
3181 * @spi: device with which data will be exchanged
3182 * @txbuf: data to be written (need not be dma-safe)
3183 * @n_tx: size of txbuf, in bytes
3184 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3185 * @n_rx: size of rxbuf, in bytes
3186 * Context: can sleep
3188 * This performs a half duplex MicroWire style transaction with the
3189 * device, sending txbuf and then reading rxbuf. The return value
3190 * is zero for success, else a negative errno status code.
3191 * This call may only be used from a context that may sleep.
3193 * Parameters to this routine are always copied using a small buffer;
3194 * portable code should never use this for more than 32 bytes.
3195 * Performance-sensitive or bulk transfer code should instead use
3196 * spi_{async,sync}() calls with dma-safe buffers.
3198 * Return: zero on success, else a negative error code.
3200 int spi_write_then_read(struct spi_device
*spi
,
3201 const void *txbuf
, unsigned n_tx
,
3202 void *rxbuf
, unsigned n_rx
)
3204 static DEFINE_MUTEX(lock
);
3207 struct spi_message message
;
3208 struct spi_transfer x
[2];
3211 /* Use preallocated DMA-safe buffer if we can. We can't avoid
3212 * copying here, (as a pure convenience thing), but we can
3213 * keep heap costs out of the hot path unless someone else is
3214 * using the pre-allocated buffer or the transfer is too large.
3216 if ((n_tx
+ n_rx
) > SPI_BUFSIZ
|| !mutex_trylock(&lock
)) {
3217 local_buf
= kmalloc(max((unsigned)SPI_BUFSIZ
, n_tx
+ n_rx
),
3218 GFP_KERNEL
| GFP_DMA
);
3225 spi_message_init(&message
);
3226 memset(x
, 0, sizeof(x
));
3229 spi_message_add_tail(&x
[0], &message
);
3233 spi_message_add_tail(&x
[1], &message
);
3236 memcpy(local_buf
, txbuf
, n_tx
);
3237 x
[0].tx_buf
= local_buf
;
3238 x
[1].rx_buf
= local_buf
+ n_tx
;
3241 status
= spi_sync(spi
, &message
);
3243 memcpy(rxbuf
, x
[1].rx_buf
, n_rx
);
3245 if (x
[0].tx_buf
== buf
)
3246 mutex_unlock(&lock
);
3252 EXPORT_SYMBOL_GPL(spi_write_then_read
);
3254 /*-------------------------------------------------------------------------*/
3256 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3257 static int __spi_of_device_match(struct device
*dev
, void *data
)
3259 return dev
->of_node
== data
;
3262 /* must call put_device() when done with returned spi_device device */
3263 static struct spi_device
*of_find_spi_device_by_node(struct device_node
*node
)
3265 struct device
*dev
= bus_find_device(&spi_bus_type
, NULL
, node
,
3266 __spi_of_device_match
);
3267 return dev
? to_spi_device(dev
) : NULL
;
3270 static int __spi_of_controller_match(struct device
*dev
, const void *data
)
3272 return dev
->of_node
== data
;
3275 /* the spi controllers are not using spi_bus, so we find it with another way */
3276 static struct spi_controller
*of_find_spi_controller_by_node(struct device_node
*node
)
3280 dev
= class_find_device(&spi_master_class
, NULL
, node
,
3281 __spi_of_controller_match
);
3282 if (!dev
&& IS_ENABLED(CONFIG_SPI_SLAVE
))
3283 dev
= class_find_device(&spi_slave_class
, NULL
, node
,
3284 __spi_of_controller_match
);
3288 /* reference got in class_find_device */
3289 return container_of(dev
, struct spi_controller
, dev
);
3292 static int of_spi_notify(struct notifier_block
*nb
, unsigned long action
,
3295 struct of_reconfig_data
*rd
= arg
;
3296 struct spi_controller
*ctlr
;
3297 struct spi_device
*spi
;
3299 switch (of_reconfig_get_state_change(action
, arg
)) {
3300 case OF_RECONFIG_CHANGE_ADD
:
3301 ctlr
= of_find_spi_controller_by_node(rd
->dn
->parent
);
3303 return NOTIFY_OK
; /* not for us */
3305 if (of_node_test_and_set_flag(rd
->dn
, OF_POPULATED
)) {
3306 put_device(&ctlr
->dev
);
3310 spi
= of_register_spi_device(ctlr
, rd
->dn
);
3311 put_device(&ctlr
->dev
);
3314 pr_err("%s: failed to create for '%s'\n",
3315 __func__
, rd
->dn
->full_name
);
3316 of_node_clear_flag(rd
->dn
, OF_POPULATED
);
3317 return notifier_from_errno(PTR_ERR(spi
));
3321 case OF_RECONFIG_CHANGE_REMOVE
:
3322 /* already depopulated? */
3323 if (!of_node_check_flag(rd
->dn
, OF_POPULATED
))
3326 /* find our device by node */
3327 spi
= of_find_spi_device_by_node(rd
->dn
);
3329 return NOTIFY_OK
; /* no? not meant for us */
3331 /* unregister takes one ref away */
3332 spi_unregister_device(spi
);
3334 /* and put the reference of the find */
3335 put_device(&spi
->dev
);
3342 static struct notifier_block spi_of_notifier
= {
3343 .notifier_call
= of_spi_notify
,
3345 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3346 extern struct notifier_block spi_of_notifier
;
3347 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3349 #if IS_ENABLED(CONFIG_ACPI)
3350 static int spi_acpi_controller_match(struct device
*dev
, const void *data
)
3352 return ACPI_COMPANION(dev
->parent
) == data
;
3355 static int spi_acpi_device_match(struct device
*dev
, void *data
)
3357 return ACPI_COMPANION(dev
) == data
;
3360 static struct spi_controller
*acpi_spi_find_controller_by_adev(struct acpi_device
*adev
)
3364 dev
= class_find_device(&spi_master_class
, NULL
, adev
,
3365 spi_acpi_controller_match
);
3366 if (!dev
&& IS_ENABLED(CONFIG_SPI_SLAVE
))
3367 dev
= class_find_device(&spi_slave_class
, NULL
, adev
,
3368 spi_acpi_controller_match
);
3372 return container_of(dev
, struct spi_controller
, dev
);
3375 static struct spi_device
*acpi_spi_find_device_by_adev(struct acpi_device
*adev
)
3379 dev
= bus_find_device(&spi_bus_type
, NULL
, adev
, spi_acpi_device_match
);
3381 return dev
? to_spi_device(dev
) : NULL
;
3384 static int acpi_spi_notify(struct notifier_block
*nb
, unsigned long value
,
3387 struct acpi_device
*adev
= arg
;
3388 struct spi_controller
*ctlr
;
3389 struct spi_device
*spi
;
3392 case ACPI_RECONFIG_DEVICE_ADD
:
3393 ctlr
= acpi_spi_find_controller_by_adev(adev
->parent
);
3397 acpi_register_spi_device(ctlr
, adev
);
3398 put_device(&ctlr
->dev
);
3400 case ACPI_RECONFIG_DEVICE_REMOVE
:
3401 if (!acpi_device_enumerated(adev
))
3404 spi
= acpi_spi_find_device_by_adev(adev
);
3408 spi_unregister_device(spi
);
3409 put_device(&spi
->dev
);
3416 static struct notifier_block spi_acpi_notifier
= {
3417 .notifier_call
= acpi_spi_notify
,
3420 extern struct notifier_block spi_acpi_notifier
;
3423 static int __init
spi_init(void)
3427 buf
= kmalloc(SPI_BUFSIZ
, GFP_KERNEL
);
3433 status
= bus_register(&spi_bus_type
);
3437 status
= class_register(&spi_master_class
);
3441 if (IS_ENABLED(CONFIG_SPI_SLAVE
)) {
3442 status
= class_register(&spi_slave_class
);
3447 if (IS_ENABLED(CONFIG_OF_DYNAMIC
))
3448 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier
));
3449 if (IS_ENABLED(CONFIG_ACPI
))
3450 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier
));
3455 class_unregister(&spi_master_class
);
3457 bus_unregister(&spi_bus_type
);
3465 /* board_info is normally registered in arch_initcall(),
3466 * but even essential drivers wait till later
3468 * REVISIT only boardinfo really needs static linking. the rest (device and
3469 * driver registration) _could_ be dynamically linked (modular) ... costs
3470 * include needing to have boardinfo data structures be much more public.
3472 postcore_initcall(spi_init
);