4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/spi.h>
44 static void spidev_release(struct device
*dev
)
46 struct spi_device
*spi
= to_spi_device(dev
);
48 /* spi masters may cleanup for released devices */
49 if (spi
->master
->cleanup
)
50 spi
->master
->cleanup(spi
);
52 spi_master_put(spi
->master
);
57 modalias_show(struct device
*dev
, struct device_attribute
*a
, char *buf
)
59 const struct spi_device
*spi
= to_spi_device(dev
);
62 len
= acpi_device_modalias(dev
, buf
, PAGE_SIZE
- 1);
66 return sprintf(buf
, "%s%s\n", SPI_MODULE_PREFIX
, spi
->modalias
);
68 static DEVICE_ATTR_RO(modalias
);
70 #define SPI_STATISTICS_ATTRS(field, file) \
71 static ssize_t spi_master_##field##_show(struct device *dev, \
72 struct device_attribute *attr, \
75 struct spi_master *master = container_of(dev, \
76 struct spi_master, dev); \
77 return spi_statistics_##field##_show(&master->statistics, buf); \
79 static struct device_attribute dev_attr_spi_master_##field = { \
80 .attr = { .name = file, .mode = S_IRUGO }, \
81 .show = spi_master_##field##_show, \
83 static ssize_t spi_device_##field##_show(struct device *dev, \
84 struct device_attribute *attr, \
87 struct spi_device *spi = to_spi_device(dev); \
88 return spi_statistics_##field##_show(&spi->statistics, buf); \
90 static struct device_attribute dev_attr_spi_device_##field = { \
91 .attr = { .name = file, .mode = S_IRUGO }, \
92 .show = spi_device_##field##_show, \
95 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
96 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
99 unsigned long flags; \
101 spin_lock_irqsave(&stat->lock, flags); \
102 len = sprintf(buf, format_string, stat->field); \
103 spin_unlock_irqrestore(&stat->lock, flags); \
106 SPI_STATISTICS_ATTRS(name, file)
108 #define SPI_STATISTICS_SHOW(field, format_string) \
109 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
110 field, format_string)
112 SPI_STATISTICS_SHOW(messages
, "%lu");
113 SPI_STATISTICS_SHOW(transfers
, "%lu");
114 SPI_STATISTICS_SHOW(errors
, "%lu");
115 SPI_STATISTICS_SHOW(timedout
, "%lu");
117 SPI_STATISTICS_SHOW(spi_sync
, "%lu");
118 SPI_STATISTICS_SHOW(spi_sync_immediate
, "%lu");
119 SPI_STATISTICS_SHOW(spi_async
, "%lu");
121 SPI_STATISTICS_SHOW(bytes
, "%llu");
122 SPI_STATISTICS_SHOW(bytes_rx
, "%llu");
123 SPI_STATISTICS_SHOW(bytes_tx
, "%llu");
125 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
126 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
127 "transfer_bytes_histo_" number, \
128 transfer_bytes_histo[index], "%lu")
129 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
147 static struct attribute
*spi_dev_attrs
[] = {
148 &dev_attr_modalias
.attr
,
152 static const struct attribute_group spi_dev_group
= {
153 .attrs
= spi_dev_attrs
,
156 static struct attribute
*spi_device_statistics_attrs
[] = {
157 &dev_attr_spi_device_messages
.attr
,
158 &dev_attr_spi_device_transfers
.attr
,
159 &dev_attr_spi_device_errors
.attr
,
160 &dev_attr_spi_device_timedout
.attr
,
161 &dev_attr_spi_device_spi_sync
.attr
,
162 &dev_attr_spi_device_spi_sync_immediate
.attr
,
163 &dev_attr_spi_device_spi_async
.attr
,
164 &dev_attr_spi_device_bytes
.attr
,
165 &dev_attr_spi_device_bytes_rx
.attr
,
166 &dev_attr_spi_device_bytes_tx
.attr
,
167 &dev_attr_spi_device_transfer_bytes_histo0
.attr
,
168 &dev_attr_spi_device_transfer_bytes_histo1
.attr
,
169 &dev_attr_spi_device_transfer_bytes_histo2
.attr
,
170 &dev_attr_spi_device_transfer_bytes_histo3
.attr
,
171 &dev_attr_spi_device_transfer_bytes_histo4
.attr
,
172 &dev_attr_spi_device_transfer_bytes_histo5
.attr
,
173 &dev_attr_spi_device_transfer_bytes_histo6
.attr
,
174 &dev_attr_spi_device_transfer_bytes_histo7
.attr
,
175 &dev_attr_spi_device_transfer_bytes_histo8
.attr
,
176 &dev_attr_spi_device_transfer_bytes_histo9
.attr
,
177 &dev_attr_spi_device_transfer_bytes_histo10
.attr
,
178 &dev_attr_spi_device_transfer_bytes_histo11
.attr
,
179 &dev_attr_spi_device_transfer_bytes_histo12
.attr
,
180 &dev_attr_spi_device_transfer_bytes_histo13
.attr
,
181 &dev_attr_spi_device_transfer_bytes_histo14
.attr
,
182 &dev_attr_spi_device_transfer_bytes_histo15
.attr
,
183 &dev_attr_spi_device_transfer_bytes_histo16
.attr
,
187 static const struct attribute_group spi_device_statistics_group
= {
188 .name
= "statistics",
189 .attrs
= spi_device_statistics_attrs
,
192 static const struct attribute_group
*spi_dev_groups
[] = {
194 &spi_device_statistics_group
,
198 static struct attribute
*spi_master_statistics_attrs
[] = {
199 &dev_attr_spi_master_messages
.attr
,
200 &dev_attr_spi_master_transfers
.attr
,
201 &dev_attr_spi_master_errors
.attr
,
202 &dev_attr_spi_master_timedout
.attr
,
203 &dev_attr_spi_master_spi_sync
.attr
,
204 &dev_attr_spi_master_spi_sync_immediate
.attr
,
205 &dev_attr_spi_master_spi_async
.attr
,
206 &dev_attr_spi_master_bytes
.attr
,
207 &dev_attr_spi_master_bytes_rx
.attr
,
208 &dev_attr_spi_master_bytes_tx
.attr
,
209 &dev_attr_spi_master_transfer_bytes_histo0
.attr
,
210 &dev_attr_spi_master_transfer_bytes_histo1
.attr
,
211 &dev_attr_spi_master_transfer_bytes_histo2
.attr
,
212 &dev_attr_spi_master_transfer_bytes_histo3
.attr
,
213 &dev_attr_spi_master_transfer_bytes_histo4
.attr
,
214 &dev_attr_spi_master_transfer_bytes_histo5
.attr
,
215 &dev_attr_spi_master_transfer_bytes_histo6
.attr
,
216 &dev_attr_spi_master_transfer_bytes_histo7
.attr
,
217 &dev_attr_spi_master_transfer_bytes_histo8
.attr
,
218 &dev_attr_spi_master_transfer_bytes_histo9
.attr
,
219 &dev_attr_spi_master_transfer_bytes_histo10
.attr
,
220 &dev_attr_spi_master_transfer_bytes_histo11
.attr
,
221 &dev_attr_spi_master_transfer_bytes_histo12
.attr
,
222 &dev_attr_spi_master_transfer_bytes_histo13
.attr
,
223 &dev_attr_spi_master_transfer_bytes_histo14
.attr
,
224 &dev_attr_spi_master_transfer_bytes_histo15
.attr
,
225 &dev_attr_spi_master_transfer_bytes_histo16
.attr
,
229 static const struct attribute_group spi_master_statistics_group
= {
230 .name
= "statistics",
231 .attrs
= spi_master_statistics_attrs
,
234 static const struct attribute_group
*spi_master_groups
[] = {
235 &spi_master_statistics_group
,
239 void spi_statistics_add_transfer_stats(struct spi_statistics
*stats
,
240 struct spi_transfer
*xfer
,
241 struct spi_master
*master
)
244 int l2len
= min(fls(xfer
->len
), SPI_STATISTICS_HISTO_SIZE
) - 1;
249 spin_lock_irqsave(&stats
->lock
, flags
);
252 stats
->transfer_bytes_histo
[l2len
]++;
254 stats
->bytes
+= xfer
->len
;
255 if ((xfer
->tx_buf
) &&
256 (xfer
->tx_buf
!= master
->dummy_tx
))
257 stats
->bytes_tx
+= xfer
->len
;
258 if ((xfer
->rx_buf
) &&
259 (xfer
->rx_buf
!= master
->dummy_rx
))
260 stats
->bytes_rx
+= xfer
->len
;
262 spin_unlock_irqrestore(&stats
->lock
, flags
);
264 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats
);
266 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
267 * and the sysfs version makes coldplug work too.
270 static const struct spi_device_id
*spi_match_id(const struct spi_device_id
*id
,
271 const struct spi_device
*sdev
)
273 while (id
->name
[0]) {
274 if (!strcmp(sdev
->modalias
, id
->name
))
281 const struct spi_device_id
*spi_get_device_id(const struct spi_device
*sdev
)
283 const struct spi_driver
*sdrv
= to_spi_driver(sdev
->dev
.driver
);
285 return spi_match_id(sdrv
->id_table
, sdev
);
287 EXPORT_SYMBOL_GPL(spi_get_device_id
);
289 static int spi_match_device(struct device
*dev
, struct device_driver
*drv
)
291 const struct spi_device
*spi
= to_spi_device(dev
);
292 const struct spi_driver
*sdrv
= to_spi_driver(drv
);
294 /* Attempt an OF style match */
295 if (of_driver_match_device(dev
, drv
))
299 if (acpi_driver_match_device(dev
, drv
))
303 return !!spi_match_id(sdrv
->id_table
, spi
);
305 return strcmp(spi
->modalias
, drv
->name
) == 0;
308 static int spi_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
310 const struct spi_device
*spi
= to_spi_device(dev
);
313 rc
= acpi_device_uevent_modalias(dev
, env
);
317 add_uevent_var(env
, "MODALIAS=%s%s", SPI_MODULE_PREFIX
, spi
->modalias
);
321 struct bus_type spi_bus_type
= {
323 .dev_groups
= spi_dev_groups
,
324 .match
= spi_match_device
,
325 .uevent
= spi_uevent
,
327 EXPORT_SYMBOL_GPL(spi_bus_type
);
330 static int spi_drv_probe(struct device
*dev
)
332 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
333 struct spi_device
*spi
= to_spi_device(dev
);
336 ret
= of_clk_set_defaults(dev
->of_node
, false);
341 spi
->irq
= of_irq_get(dev
->of_node
, 0);
342 if (spi
->irq
== -EPROBE_DEFER
)
343 return -EPROBE_DEFER
;
348 ret
= dev_pm_domain_attach(dev
, true);
349 if (ret
!= -EPROBE_DEFER
) {
350 ret
= sdrv
->probe(spi
);
352 dev_pm_domain_detach(dev
, true);
358 static int spi_drv_remove(struct device
*dev
)
360 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
363 ret
= sdrv
->remove(to_spi_device(dev
));
364 dev_pm_domain_detach(dev
, true);
369 static void spi_drv_shutdown(struct device
*dev
)
371 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
373 sdrv
->shutdown(to_spi_device(dev
));
377 * __spi_register_driver - register a SPI driver
378 * @owner: owner module of the driver to register
379 * @sdrv: the driver to register
382 * Return: zero on success, else a negative error code.
384 int __spi_register_driver(struct module
*owner
, struct spi_driver
*sdrv
)
386 sdrv
->driver
.owner
= owner
;
387 sdrv
->driver
.bus
= &spi_bus_type
;
389 sdrv
->driver
.probe
= spi_drv_probe
;
391 sdrv
->driver
.remove
= spi_drv_remove
;
393 sdrv
->driver
.shutdown
= spi_drv_shutdown
;
394 return driver_register(&sdrv
->driver
);
396 EXPORT_SYMBOL_GPL(__spi_register_driver
);
398 /*-------------------------------------------------------------------------*/
400 /* SPI devices should normally not be created by SPI device drivers; that
401 * would make them board-specific. Similarly with SPI master drivers.
402 * Device registration normally goes into like arch/.../mach.../board-YYY.c
403 * with other readonly (flashable) information about mainboard devices.
407 struct list_head list
;
408 struct spi_board_info board_info
;
411 static LIST_HEAD(board_list
);
412 static LIST_HEAD(spi_master_list
);
415 * Used to protect add/del opertion for board_info list and
416 * spi_master list, and their matching process
418 static DEFINE_MUTEX(board_lock
);
421 * spi_alloc_device - Allocate a new SPI device
422 * @master: Controller to which device is connected
425 * Allows a driver to allocate and initialize a spi_device without
426 * registering it immediately. This allows a driver to directly
427 * fill the spi_device with device parameters before calling
428 * spi_add_device() on it.
430 * Caller is responsible to call spi_add_device() on the returned
431 * spi_device structure to add it to the SPI master. If the caller
432 * needs to discard the spi_device without adding it, then it should
433 * call spi_dev_put() on it.
435 * Return: a pointer to the new device, or NULL.
437 struct spi_device
*spi_alloc_device(struct spi_master
*master
)
439 struct spi_device
*spi
;
441 if (!spi_master_get(master
))
444 spi
= kzalloc(sizeof(*spi
), GFP_KERNEL
);
446 spi_master_put(master
);
450 spi
->master
= master
;
451 spi
->dev
.parent
= &master
->dev
;
452 spi
->dev
.bus
= &spi_bus_type
;
453 spi
->dev
.release
= spidev_release
;
454 spi
->cs_gpio
= -ENOENT
;
456 spin_lock_init(&spi
->statistics
.lock
);
458 device_initialize(&spi
->dev
);
461 EXPORT_SYMBOL_GPL(spi_alloc_device
);
463 static void spi_dev_set_name(struct spi_device
*spi
)
465 struct acpi_device
*adev
= ACPI_COMPANION(&spi
->dev
);
468 dev_set_name(&spi
->dev
, "spi-%s", acpi_dev_name(adev
));
472 dev_set_name(&spi
->dev
, "%s.%u", dev_name(&spi
->master
->dev
),
476 static int spi_dev_check(struct device
*dev
, void *data
)
478 struct spi_device
*spi
= to_spi_device(dev
);
479 struct spi_device
*new_spi
= data
;
481 if (spi
->master
== new_spi
->master
&&
482 spi
->chip_select
== new_spi
->chip_select
)
488 * spi_add_device - Add spi_device allocated with spi_alloc_device
489 * @spi: spi_device to register
491 * Companion function to spi_alloc_device. Devices allocated with
492 * spi_alloc_device can be added onto the spi bus with this function.
494 * Return: 0 on success; negative errno on failure
496 int spi_add_device(struct spi_device
*spi
)
498 static DEFINE_MUTEX(spi_add_lock
);
499 struct spi_master
*master
= spi
->master
;
500 struct device
*dev
= master
->dev
.parent
;
503 /* Chipselects are numbered 0..max; validate. */
504 if (spi
->chip_select
>= master
->num_chipselect
) {
505 dev_err(dev
, "cs%d >= max %d\n",
507 master
->num_chipselect
);
511 /* Set the bus ID string */
512 spi_dev_set_name(spi
);
514 /* We need to make sure there's no other device with this
515 * chipselect **BEFORE** we call setup(), else we'll trash
516 * its configuration. Lock against concurrent add() calls.
518 mutex_lock(&spi_add_lock
);
520 status
= bus_for_each_dev(&spi_bus_type
, NULL
, spi
, spi_dev_check
);
522 dev_err(dev
, "chipselect %d already in use\n",
527 if (master
->cs_gpios
)
528 spi
->cs_gpio
= master
->cs_gpios
[spi
->chip_select
];
530 /* Drivers may modify this initial i/o setup, but will
531 * normally rely on the device being setup. Devices
532 * using SPI_CS_HIGH can't coexist well otherwise...
534 status
= spi_setup(spi
);
536 dev_err(dev
, "can't setup %s, status %d\n",
537 dev_name(&spi
->dev
), status
);
541 /* Device may be bound to an active driver when this returns */
542 status
= device_add(&spi
->dev
);
544 dev_err(dev
, "can't add %s, status %d\n",
545 dev_name(&spi
->dev
), status
);
547 dev_dbg(dev
, "registered child %s\n", dev_name(&spi
->dev
));
550 mutex_unlock(&spi_add_lock
);
553 EXPORT_SYMBOL_GPL(spi_add_device
);
556 * spi_new_device - instantiate one new SPI device
557 * @master: Controller to which device is connected
558 * @chip: Describes the SPI device
561 * On typical mainboards, this is purely internal; and it's not needed
562 * after board init creates the hard-wired devices. Some development
563 * platforms may not be able to use spi_register_board_info though, and
564 * this is exported so that for example a USB or parport based adapter
565 * driver could add devices (which it would learn about out-of-band).
567 * Return: the new device, or NULL.
569 struct spi_device
*spi_new_device(struct spi_master
*master
,
570 struct spi_board_info
*chip
)
572 struct spi_device
*proxy
;
575 /* NOTE: caller did any chip->bus_num checks necessary.
577 * Also, unless we change the return value convention to use
578 * error-or-pointer (not NULL-or-pointer), troubleshootability
579 * suggests syslogged diagnostics are best here (ugh).
582 proxy
= spi_alloc_device(master
);
586 WARN_ON(strlen(chip
->modalias
) >= sizeof(proxy
->modalias
));
588 proxy
->chip_select
= chip
->chip_select
;
589 proxy
->max_speed_hz
= chip
->max_speed_hz
;
590 proxy
->mode
= chip
->mode
;
591 proxy
->irq
= chip
->irq
;
592 strlcpy(proxy
->modalias
, chip
->modalias
, sizeof(proxy
->modalias
));
593 proxy
->dev
.platform_data
= (void *) chip
->platform_data
;
594 proxy
->controller_data
= chip
->controller_data
;
595 proxy
->controller_state
= NULL
;
597 status
= spi_add_device(proxy
);
605 EXPORT_SYMBOL_GPL(spi_new_device
);
608 * spi_unregister_device - unregister a single SPI device
609 * @spi: spi_device to unregister
611 * Start making the passed SPI device vanish. Normally this would be handled
612 * by spi_unregister_master().
614 void spi_unregister_device(struct spi_device
*spi
)
619 if (spi
->dev
.of_node
)
620 of_node_clear_flag(spi
->dev
.of_node
, OF_POPULATED
);
621 device_unregister(&spi
->dev
);
623 EXPORT_SYMBOL_GPL(spi_unregister_device
);
625 static void spi_match_master_to_boardinfo(struct spi_master
*master
,
626 struct spi_board_info
*bi
)
628 struct spi_device
*dev
;
630 if (master
->bus_num
!= bi
->bus_num
)
633 dev
= spi_new_device(master
, bi
);
635 dev_err(master
->dev
.parent
, "can't create new device for %s\n",
640 * spi_register_board_info - register SPI devices for a given board
641 * @info: array of chip descriptors
642 * @n: how many descriptors are provided
645 * Board-specific early init code calls this (probably during arch_initcall)
646 * with segments of the SPI device table. Any device nodes are created later,
647 * after the relevant parent SPI controller (bus_num) is defined. We keep
648 * this table of devices forever, so that reloading a controller driver will
649 * not make Linux forget about these hard-wired devices.
651 * Other code can also call this, e.g. a particular add-on board might provide
652 * SPI devices through its expansion connector, so code initializing that board
653 * would naturally declare its SPI devices.
655 * The board info passed can safely be __initdata ... but be careful of
656 * any embedded pointers (platform_data, etc), they're copied as-is.
658 * Return: zero on success, else a negative error code.
660 int spi_register_board_info(struct spi_board_info
const *info
, unsigned n
)
662 struct boardinfo
*bi
;
668 bi
= kzalloc(n
* sizeof(*bi
), GFP_KERNEL
);
672 for (i
= 0; i
< n
; i
++, bi
++, info
++) {
673 struct spi_master
*master
;
675 memcpy(&bi
->board_info
, info
, sizeof(*info
));
676 mutex_lock(&board_lock
);
677 list_add_tail(&bi
->list
, &board_list
);
678 list_for_each_entry(master
, &spi_master_list
, list
)
679 spi_match_master_to_boardinfo(master
, &bi
->board_info
);
680 mutex_unlock(&board_lock
);
686 /*-------------------------------------------------------------------------*/
688 static void spi_set_cs(struct spi_device
*spi
, bool enable
)
690 if (spi
->mode
& SPI_CS_HIGH
)
693 if (gpio_is_valid(spi
->cs_gpio
))
694 gpio_set_value(spi
->cs_gpio
, !enable
);
695 else if (spi
->master
->set_cs
)
696 spi
->master
->set_cs(spi
, !enable
);
699 #ifdef CONFIG_HAS_DMA
700 static int spi_map_buf(struct spi_master
*master
, struct device
*dev
,
701 struct sg_table
*sgt
, void *buf
, size_t len
,
702 enum dma_data_direction dir
)
704 const bool vmalloced_buf
= is_vmalloc_addr(buf
);
707 struct page
*vm_page
;
713 desc_len
= PAGE_SIZE
;
714 sgs
= DIV_ROUND_UP(len
+ offset_in_page(buf
), desc_len
);
716 desc_len
= master
->max_dma_len
;
717 sgs
= DIV_ROUND_UP(len
, desc_len
);
720 ret
= sg_alloc_table(sgt
, sgs
, GFP_KERNEL
);
724 for (i
= 0; i
< sgs
; i
++) {
728 len
, desc_len
- offset_in_page(buf
));
729 vm_page
= vmalloc_to_page(buf
);
734 sg_set_page(&sgt
->sgl
[i
], vm_page
,
735 min
, offset_in_page(buf
));
737 min
= min_t(size_t, len
, desc_len
);
739 sg_set_buf(&sgt
->sgl
[i
], sg_buf
, min
);
747 ret
= dma_map_sg(dev
, sgt
->sgl
, sgt
->nents
, dir
);
760 static void spi_unmap_buf(struct spi_master
*master
, struct device
*dev
,
761 struct sg_table
*sgt
, enum dma_data_direction dir
)
763 if (sgt
->orig_nents
) {
764 dma_unmap_sg(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
769 static int __spi_map_msg(struct spi_master
*master
, struct spi_message
*msg
)
771 struct device
*tx_dev
, *rx_dev
;
772 struct spi_transfer
*xfer
;
775 if (!master
->can_dma
)
779 tx_dev
= master
->dma_tx
->device
->dev
;
781 tx_dev
= &master
->dev
;
784 rx_dev
= master
->dma_rx
->device
->dev
;
786 rx_dev
= &master
->dev
;
788 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
789 if (!master
->can_dma(master
, msg
->spi
, xfer
))
792 if (xfer
->tx_buf
!= NULL
) {
793 ret
= spi_map_buf(master
, tx_dev
, &xfer
->tx_sg
,
794 (void *)xfer
->tx_buf
, xfer
->len
,
800 if (xfer
->rx_buf
!= NULL
) {
801 ret
= spi_map_buf(master
, rx_dev
, &xfer
->rx_sg
,
802 xfer
->rx_buf
, xfer
->len
,
805 spi_unmap_buf(master
, tx_dev
, &xfer
->tx_sg
,
812 master
->cur_msg_mapped
= true;
817 static int __spi_unmap_msg(struct spi_master
*master
, struct spi_message
*msg
)
819 struct spi_transfer
*xfer
;
820 struct device
*tx_dev
, *rx_dev
;
822 if (!master
->cur_msg_mapped
|| !master
->can_dma
)
826 tx_dev
= master
->dma_tx
->device
->dev
;
828 tx_dev
= &master
->dev
;
831 rx_dev
= master
->dma_rx
->device
->dev
;
833 rx_dev
= &master
->dev
;
835 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
836 if (!master
->can_dma(master
, msg
->spi
, xfer
))
839 spi_unmap_buf(master
, rx_dev
, &xfer
->rx_sg
, DMA_FROM_DEVICE
);
840 spi_unmap_buf(master
, tx_dev
, &xfer
->tx_sg
, DMA_TO_DEVICE
);
845 #else /* !CONFIG_HAS_DMA */
846 static inline int __spi_map_msg(struct spi_master
*master
,
847 struct spi_message
*msg
)
852 static inline int __spi_unmap_msg(struct spi_master
*master
,
853 struct spi_message
*msg
)
857 #endif /* !CONFIG_HAS_DMA */
859 static inline int spi_unmap_msg(struct spi_master
*master
,
860 struct spi_message
*msg
)
862 struct spi_transfer
*xfer
;
864 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
866 * Restore the original value of tx_buf or rx_buf if they are
869 if (xfer
->tx_buf
== master
->dummy_tx
)
871 if (xfer
->rx_buf
== master
->dummy_rx
)
875 return __spi_unmap_msg(master
, msg
);
878 static int spi_map_msg(struct spi_master
*master
, struct spi_message
*msg
)
880 struct spi_transfer
*xfer
;
882 unsigned int max_tx
, max_rx
;
884 if (master
->flags
& (SPI_MASTER_MUST_RX
| SPI_MASTER_MUST_TX
)) {
888 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
889 if ((master
->flags
& SPI_MASTER_MUST_TX
) &&
891 max_tx
= max(xfer
->len
, max_tx
);
892 if ((master
->flags
& SPI_MASTER_MUST_RX
) &&
894 max_rx
= max(xfer
->len
, max_rx
);
898 tmp
= krealloc(master
->dummy_tx
, max_tx
,
899 GFP_KERNEL
| GFP_DMA
);
902 master
->dummy_tx
= tmp
;
903 memset(tmp
, 0, max_tx
);
907 tmp
= krealloc(master
->dummy_rx
, max_rx
,
908 GFP_KERNEL
| GFP_DMA
);
911 master
->dummy_rx
= tmp
;
914 if (max_tx
|| max_rx
) {
915 list_for_each_entry(xfer
, &msg
->transfers
,
918 xfer
->tx_buf
= master
->dummy_tx
;
920 xfer
->rx_buf
= master
->dummy_rx
;
925 return __spi_map_msg(master
, msg
);
929 * spi_transfer_one_message - Default implementation of transfer_one_message()
931 * This is a standard implementation of transfer_one_message() for
932 * drivers which impelment a transfer_one() operation. It provides
933 * standard handling of delays and chip select management.
935 static int spi_transfer_one_message(struct spi_master
*master
,
936 struct spi_message
*msg
)
938 struct spi_transfer
*xfer
;
939 bool keep_cs
= false;
941 unsigned long ms
= 1;
942 struct spi_statistics
*statm
= &master
->statistics
;
943 struct spi_statistics
*stats
= &msg
->spi
->statistics
;
945 spi_set_cs(msg
->spi
, true);
947 SPI_STATISTICS_INCREMENT_FIELD(statm
, messages
);
948 SPI_STATISTICS_INCREMENT_FIELD(stats
, messages
);
950 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
951 trace_spi_transfer_start(msg
, xfer
);
953 spi_statistics_add_transfer_stats(statm
, xfer
, master
);
954 spi_statistics_add_transfer_stats(stats
, xfer
, master
);
956 if (xfer
->tx_buf
|| xfer
->rx_buf
) {
957 reinit_completion(&master
->xfer_completion
);
959 ret
= master
->transfer_one(master
, msg
->spi
, xfer
);
961 SPI_STATISTICS_INCREMENT_FIELD(statm
,
963 SPI_STATISTICS_INCREMENT_FIELD(stats
,
965 dev_err(&msg
->spi
->dev
,
966 "SPI transfer failed: %d\n", ret
);
972 ms
= xfer
->len
* 8 * 1000 / xfer
->speed_hz
;
973 ms
+= ms
+ 100; /* some tolerance */
975 ms
= wait_for_completion_timeout(&master
->xfer_completion
,
976 msecs_to_jiffies(ms
));
980 SPI_STATISTICS_INCREMENT_FIELD(statm
,
982 SPI_STATISTICS_INCREMENT_FIELD(stats
,
984 dev_err(&msg
->spi
->dev
,
985 "SPI transfer timed out\n");
986 msg
->status
= -ETIMEDOUT
;
990 dev_err(&msg
->spi
->dev
,
991 "Bufferless transfer has length %u\n",
995 trace_spi_transfer_stop(msg
, xfer
);
997 if (msg
->status
!= -EINPROGRESS
)
1000 if (xfer
->delay_usecs
)
1001 udelay(xfer
->delay_usecs
);
1003 if (xfer
->cs_change
) {
1004 if (list_is_last(&xfer
->transfer_list
,
1008 spi_set_cs(msg
->spi
, false);
1010 spi_set_cs(msg
->spi
, true);
1014 msg
->actual_length
+= xfer
->len
;
1018 if (ret
!= 0 || !keep_cs
)
1019 spi_set_cs(msg
->spi
, false);
1021 if (msg
->status
== -EINPROGRESS
)
1024 if (msg
->status
&& master
->handle_err
)
1025 master
->handle_err(master
, msg
);
1027 spi_res_release(master
, msg
);
1029 spi_finalize_current_message(master
);
1035 * spi_finalize_current_transfer - report completion of a transfer
1036 * @master: the master reporting completion
1038 * Called by SPI drivers using the core transfer_one_message()
1039 * implementation to notify it that the current interrupt driven
1040 * transfer has finished and the next one may be scheduled.
1042 void spi_finalize_current_transfer(struct spi_master
*master
)
1044 complete(&master
->xfer_completion
);
1046 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer
);
1049 * __spi_pump_messages - function which processes spi message queue
1050 * @master: master to process queue for
1051 * @in_kthread: true if we are in the context of the message pump thread
1053 * This function checks if there is any spi message in the queue that
1054 * needs processing and if so call out to the driver to initialize hardware
1055 * and transfer each message.
1057 * Note that it is called both from the kthread itself and also from
1058 * inside spi_sync(); the queue extraction handling at the top of the
1059 * function should deal with this safely.
1061 static void __spi_pump_messages(struct spi_master
*master
, bool in_kthread
)
1063 unsigned long flags
;
1064 bool was_busy
= false;
1068 spin_lock_irqsave(&master
->queue_lock
, flags
);
1070 /* Make sure we are not already running a message */
1071 if (master
->cur_msg
) {
1072 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1076 /* If another context is idling the device then defer */
1077 if (master
->idling
) {
1078 queue_kthread_work(&master
->kworker
, &master
->pump_messages
);
1079 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1083 /* Check if the queue is idle */
1084 if (list_empty(&master
->queue
) || !master
->running
) {
1085 if (!master
->busy
) {
1086 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1090 /* Only do teardown in the thread */
1092 queue_kthread_work(&master
->kworker
,
1093 &master
->pump_messages
);
1094 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1098 master
->busy
= false;
1099 master
->idling
= true;
1100 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1102 kfree(master
->dummy_rx
);
1103 master
->dummy_rx
= NULL
;
1104 kfree(master
->dummy_tx
);
1105 master
->dummy_tx
= NULL
;
1106 if (master
->unprepare_transfer_hardware
&&
1107 master
->unprepare_transfer_hardware(master
))
1108 dev_err(&master
->dev
,
1109 "failed to unprepare transfer hardware\n");
1110 if (master
->auto_runtime_pm
) {
1111 pm_runtime_mark_last_busy(master
->dev
.parent
);
1112 pm_runtime_put_autosuspend(master
->dev
.parent
);
1114 trace_spi_master_idle(master
);
1116 spin_lock_irqsave(&master
->queue_lock
, flags
);
1117 master
->idling
= false;
1118 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1122 /* Extract head of queue */
1124 list_first_entry(&master
->queue
, struct spi_message
, queue
);
1126 list_del_init(&master
->cur_msg
->queue
);
1130 master
->busy
= true;
1131 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1133 if (!was_busy
&& master
->auto_runtime_pm
) {
1134 ret
= pm_runtime_get_sync(master
->dev
.parent
);
1136 dev_err(&master
->dev
, "Failed to power device: %d\n",
1143 trace_spi_master_busy(master
);
1145 if (!was_busy
&& master
->prepare_transfer_hardware
) {
1146 ret
= master
->prepare_transfer_hardware(master
);
1148 dev_err(&master
->dev
,
1149 "failed to prepare transfer hardware\n");
1151 if (master
->auto_runtime_pm
)
1152 pm_runtime_put(master
->dev
.parent
);
1157 trace_spi_message_start(master
->cur_msg
);
1159 if (master
->prepare_message
) {
1160 ret
= master
->prepare_message(master
, master
->cur_msg
);
1162 dev_err(&master
->dev
,
1163 "failed to prepare message: %d\n", ret
);
1164 master
->cur_msg
->status
= ret
;
1165 spi_finalize_current_message(master
);
1168 master
->cur_msg_prepared
= true;
1171 ret
= spi_map_msg(master
, master
->cur_msg
);
1173 master
->cur_msg
->status
= ret
;
1174 spi_finalize_current_message(master
);
1178 ret
= master
->transfer_one_message(master
, master
->cur_msg
);
1180 dev_err(&master
->dev
,
1181 "failed to transfer one message from queue\n");
1187 * spi_pump_messages - kthread work function which processes spi message queue
1188 * @work: pointer to kthread work struct contained in the master struct
1190 static void spi_pump_messages(struct kthread_work
*work
)
1192 struct spi_master
*master
=
1193 container_of(work
, struct spi_master
, pump_messages
);
1195 __spi_pump_messages(master
, true);
1198 static int spi_init_queue(struct spi_master
*master
)
1200 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
- 1 };
1202 master
->running
= false;
1203 master
->busy
= false;
1205 init_kthread_worker(&master
->kworker
);
1206 master
->kworker_task
= kthread_run(kthread_worker_fn
,
1207 &master
->kworker
, "%s",
1208 dev_name(&master
->dev
));
1209 if (IS_ERR(master
->kworker_task
)) {
1210 dev_err(&master
->dev
, "failed to create message pump task\n");
1211 return PTR_ERR(master
->kworker_task
);
1213 init_kthread_work(&master
->pump_messages
, spi_pump_messages
);
1216 * Master config will indicate if this controller should run the
1217 * message pump with high (realtime) priority to reduce the transfer
1218 * latency on the bus by minimising the delay between a transfer
1219 * request and the scheduling of the message pump thread. Without this
1220 * setting the message pump thread will remain at default priority.
1223 dev_info(&master
->dev
,
1224 "will run message pump with realtime priority\n");
1225 sched_setscheduler(master
->kworker_task
, SCHED_FIFO
, ¶m
);
1232 * spi_get_next_queued_message() - called by driver to check for queued
1234 * @master: the master to check for queued messages
1236 * If there are more messages in the queue, the next message is returned from
1239 * Return: the next message in the queue, else NULL if the queue is empty.
1241 struct spi_message
*spi_get_next_queued_message(struct spi_master
*master
)
1243 struct spi_message
*next
;
1244 unsigned long flags
;
1246 /* get a pointer to the next message, if any */
1247 spin_lock_irqsave(&master
->queue_lock
, flags
);
1248 next
= list_first_entry_or_null(&master
->queue
, struct spi_message
,
1250 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1254 EXPORT_SYMBOL_GPL(spi_get_next_queued_message
);
1257 * spi_finalize_current_message() - the current message is complete
1258 * @master: the master to return the message to
1260 * Called by the driver to notify the core that the message in the front of the
1261 * queue is complete and can be removed from the queue.
1263 void spi_finalize_current_message(struct spi_master
*master
)
1265 struct spi_message
*mesg
;
1266 unsigned long flags
;
1269 spin_lock_irqsave(&master
->queue_lock
, flags
);
1270 mesg
= master
->cur_msg
;
1271 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1273 spi_unmap_msg(master
, mesg
);
1275 if (master
->cur_msg_prepared
&& master
->unprepare_message
) {
1276 ret
= master
->unprepare_message(master
, mesg
);
1278 dev_err(&master
->dev
,
1279 "failed to unprepare message: %d\n", ret
);
1283 spin_lock_irqsave(&master
->queue_lock
, flags
);
1284 master
->cur_msg
= NULL
;
1285 master
->cur_msg_prepared
= false;
1286 queue_kthread_work(&master
->kworker
, &master
->pump_messages
);
1287 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1289 trace_spi_message_done(mesg
);
1293 mesg
->complete(mesg
->context
);
1295 EXPORT_SYMBOL_GPL(spi_finalize_current_message
);
1297 static int spi_start_queue(struct spi_master
*master
)
1299 unsigned long flags
;
1301 spin_lock_irqsave(&master
->queue_lock
, flags
);
1303 if (master
->running
|| master
->busy
) {
1304 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1308 master
->running
= true;
1309 master
->cur_msg
= NULL
;
1310 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1312 queue_kthread_work(&master
->kworker
, &master
->pump_messages
);
1317 static int spi_stop_queue(struct spi_master
*master
)
1319 unsigned long flags
;
1320 unsigned limit
= 500;
1323 spin_lock_irqsave(&master
->queue_lock
, flags
);
1326 * This is a bit lame, but is optimized for the common execution path.
1327 * A wait_queue on the master->busy could be used, but then the common
1328 * execution path (pump_messages) would be required to call wake_up or
1329 * friends on every SPI message. Do this instead.
1331 while ((!list_empty(&master
->queue
) || master
->busy
) && limit
--) {
1332 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1333 usleep_range(10000, 11000);
1334 spin_lock_irqsave(&master
->queue_lock
, flags
);
1337 if (!list_empty(&master
->queue
) || master
->busy
)
1340 master
->running
= false;
1342 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1345 dev_warn(&master
->dev
,
1346 "could not stop message queue\n");
1352 static int spi_destroy_queue(struct spi_master
*master
)
1356 ret
= spi_stop_queue(master
);
1359 * flush_kthread_worker will block until all work is done.
1360 * If the reason that stop_queue timed out is that the work will never
1361 * finish, then it does no good to call flush/stop thread, so
1365 dev_err(&master
->dev
, "problem destroying queue\n");
1369 flush_kthread_worker(&master
->kworker
);
1370 kthread_stop(master
->kworker_task
);
1375 static int __spi_queued_transfer(struct spi_device
*spi
,
1376 struct spi_message
*msg
,
1379 struct spi_master
*master
= spi
->master
;
1380 unsigned long flags
;
1382 spin_lock_irqsave(&master
->queue_lock
, flags
);
1384 if (!master
->running
) {
1385 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1388 msg
->actual_length
= 0;
1389 msg
->status
= -EINPROGRESS
;
1391 list_add_tail(&msg
->queue
, &master
->queue
);
1392 if (!master
->busy
&& need_pump
)
1393 queue_kthread_work(&master
->kworker
, &master
->pump_messages
);
1395 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1400 * spi_queued_transfer - transfer function for queued transfers
1401 * @spi: spi device which is requesting transfer
1402 * @msg: spi message which is to handled is queued to driver queue
1404 * Return: zero on success, else a negative error code.
1406 static int spi_queued_transfer(struct spi_device
*spi
, struct spi_message
*msg
)
1408 return __spi_queued_transfer(spi
, msg
, true);
1411 static int spi_master_initialize_queue(struct spi_master
*master
)
1415 master
->transfer
= spi_queued_transfer
;
1416 if (!master
->transfer_one_message
)
1417 master
->transfer_one_message
= spi_transfer_one_message
;
1419 /* Initialize and start queue */
1420 ret
= spi_init_queue(master
);
1422 dev_err(&master
->dev
, "problem initializing queue\n");
1423 goto err_init_queue
;
1425 master
->queued
= true;
1426 ret
= spi_start_queue(master
);
1428 dev_err(&master
->dev
, "problem starting queue\n");
1429 goto err_start_queue
;
1435 spi_destroy_queue(master
);
1440 /*-------------------------------------------------------------------------*/
1442 #if defined(CONFIG_OF)
1443 static struct spi_device
*
1444 of_register_spi_device(struct spi_master
*master
, struct device_node
*nc
)
1446 struct spi_device
*spi
;
1450 /* Alloc an spi_device */
1451 spi
= spi_alloc_device(master
);
1453 dev_err(&master
->dev
, "spi_device alloc error for %s\n",
1459 /* Select device driver */
1460 rc
= of_modalias_node(nc
, spi
->modalias
,
1461 sizeof(spi
->modalias
));
1463 dev_err(&master
->dev
, "cannot find modalias for %s\n",
1468 /* Device address */
1469 rc
= of_property_read_u32(nc
, "reg", &value
);
1471 dev_err(&master
->dev
, "%s has no valid 'reg' property (%d)\n",
1475 spi
->chip_select
= value
;
1477 /* Mode (clock phase/polarity/etc.) */
1478 if (of_find_property(nc
, "spi-cpha", NULL
))
1479 spi
->mode
|= SPI_CPHA
;
1480 if (of_find_property(nc
, "spi-cpol", NULL
))
1481 spi
->mode
|= SPI_CPOL
;
1482 if (of_find_property(nc
, "spi-cs-high", NULL
))
1483 spi
->mode
|= SPI_CS_HIGH
;
1484 if (of_find_property(nc
, "spi-3wire", NULL
))
1485 spi
->mode
|= SPI_3WIRE
;
1486 if (of_find_property(nc
, "spi-lsb-first", NULL
))
1487 spi
->mode
|= SPI_LSB_FIRST
;
1489 /* Device DUAL/QUAD mode */
1490 if (!of_property_read_u32(nc
, "spi-tx-bus-width", &value
)) {
1495 spi
->mode
|= SPI_TX_DUAL
;
1498 spi
->mode
|= SPI_TX_QUAD
;
1501 dev_warn(&master
->dev
,
1502 "spi-tx-bus-width %d not supported\n",
1508 if (!of_property_read_u32(nc
, "spi-rx-bus-width", &value
)) {
1513 spi
->mode
|= SPI_RX_DUAL
;
1516 spi
->mode
|= SPI_RX_QUAD
;
1519 dev_warn(&master
->dev
,
1520 "spi-rx-bus-width %d not supported\n",
1527 rc
= of_property_read_u32(nc
, "spi-max-frequency", &value
);
1529 dev_err(&master
->dev
, "%s has no valid 'spi-max-frequency' property (%d)\n",
1533 spi
->max_speed_hz
= value
;
1535 /* Store a pointer to the node in the device structure */
1537 spi
->dev
.of_node
= nc
;
1539 /* Register the new device */
1540 rc
= spi_add_device(spi
);
1542 dev_err(&master
->dev
, "spi_device register error %s\n",
1555 * of_register_spi_devices() - Register child devices onto the SPI bus
1556 * @master: Pointer to spi_master device
1558 * Registers an spi_device for each child node of master node which has a 'reg'
1561 static void of_register_spi_devices(struct spi_master
*master
)
1563 struct spi_device
*spi
;
1564 struct device_node
*nc
;
1566 if (!master
->dev
.of_node
)
1569 for_each_available_child_of_node(master
->dev
.of_node
, nc
) {
1570 if (of_node_test_and_set_flag(nc
, OF_POPULATED
))
1572 spi
= of_register_spi_device(master
, nc
);
1574 dev_warn(&master
->dev
, "Failed to create SPI device for %s\n",
1579 static void of_register_spi_devices(struct spi_master
*master
) { }
1583 static int acpi_spi_add_resource(struct acpi_resource
*ares
, void *data
)
1585 struct spi_device
*spi
= data
;
1587 if (ares
->type
== ACPI_RESOURCE_TYPE_SERIAL_BUS
) {
1588 struct acpi_resource_spi_serialbus
*sb
;
1590 sb
= &ares
->data
.spi_serial_bus
;
1591 if (sb
->type
== ACPI_RESOURCE_SERIAL_TYPE_SPI
) {
1592 spi
->chip_select
= sb
->device_selection
;
1593 spi
->max_speed_hz
= sb
->connection_speed
;
1595 if (sb
->clock_phase
== ACPI_SPI_SECOND_PHASE
)
1596 spi
->mode
|= SPI_CPHA
;
1597 if (sb
->clock_polarity
== ACPI_SPI_START_HIGH
)
1598 spi
->mode
|= SPI_CPOL
;
1599 if (sb
->device_polarity
== ACPI_SPI_ACTIVE_HIGH
)
1600 spi
->mode
|= SPI_CS_HIGH
;
1602 } else if (spi
->irq
< 0) {
1605 if (acpi_dev_resource_interrupt(ares
, 0, &r
))
1609 /* Always tell the ACPI core to skip this resource */
1613 static acpi_status
acpi_spi_add_device(acpi_handle handle
, u32 level
,
1614 void *data
, void **return_value
)
1616 struct spi_master
*master
= data
;
1617 struct list_head resource_list
;
1618 struct acpi_device
*adev
;
1619 struct spi_device
*spi
;
1622 if (acpi_bus_get_device(handle
, &adev
))
1624 if (acpi_bus_get_status(adev
) || !adev
->status
.present
)
1627 spi
= spi_alloc_device(master
);
1629 dev_err(&master
->dev
, "failed to allocate SPI device for %s\n",
1630 dev_name(&adev
->dev
));
1631 return AE_NO_MEMORY
;
1634 ACPI_COMPANION_SET(&spi
->dev
, adev
);
1637 INIT_LIST_HEAD(&resource_list
);
1638 ret
= acpi_dev_get_resources(adev
, &resource_list
,
1639 acpi_spi_add_resource
, spi
);
1640 acpi_dev_free_resource_list(&resource_list
);
1642 if (ret
< 0 || !spi
->max_speed_hz
) {
1648 spi
->irq
= acpi_dev_gpio_irq_get(adev
, 0);
1650 adev
->power
.flags
.ignore_parent
= true;
1651 strlcpy(spi
->modalias
, acpi_device_hid(adev
), sizeof(spi
->modalias
));
1652 if (spi_add_device(spi
)) {
1653 adev
->power
.flags
.ignore_parent
= false;
1654 dev_err(&master
->dev
, "failed to add SPI device %s from ACPI\n",
1655 dev_name(&adev
->dev
));
1662 static void acpi_register_spi_devices(struct spi_master
*master
)
1667 handle
= ACPI_HANDLE(master
->dev
.parent
);
1671 status
= acpi_walk_namespace(ACPI_TYPE_DEVICE
, handle
, 1,
1672 acpi_spi_add_device
, NULL
,
1674 if (ACPI_FAILURE(status
))
1675 dev_warn(&master
->dev
, "failed to enumerate SPI slaves\n");
1678 static inline void acpi_register_spi_devices(struct spi_master
*master
) {}
1679 #endif /* CONFIG_ACPI */
1681 static void spi_master_release(struct device
*dev
)
1683 struct spi_master
*master
;
1685 master
= container_of(dev
, struct spi_master
, dev
);
1689 static struct class spi_master_class
= {
1690 .name
= "spi_master",
1691 .owner
= THIS_MODULE
,
1692 .dev_release
= spi_master_release
,
1693 .dev_groups
= spi_master_groups
,
1698 * spi_alloc_master - allocate SPI master controller
1699 * @dev: the controller, possibly using the platform_bus
1700 * @size: how much zeroed driver-private data to allocate; the pointer to this
1701 * memory is in the driver_data field of the returned device,
1702 * accessible with spi_master_get_devdata().
1703 * Context: can sleep
1705 * This call is used only by SPI master controller drivers, which are the
1706 * only ones directly touching chip registers. It's how they allocate
1707 * an spi_master structure, prior to calling spi_register_master().
1709 * This must be called from context that can sleep.
1711 * The caller is responsible for assigning the bus number and initializing
1712 * the master's methods before calling spi_register_master(); and (after errors
1713 * adding the device) calling spi_master_put() to prevent a memory leak.
1715 * Return: the SPI master structure on success, else NULL.
1717 struct spi_master
*spi_alloc_master(struct device
*dev
, unsigned size
)
1719 struct spi_master
*master
;
1724 master
= kzalloc(size
+ sizeof(*master
), GFP_KERNEL
);
1728 device_initialize(&master
->dev
);
1729 master
->bus_num
= -1;
1730 master
->num_chipselect
= 1;
1731 master
->dev
.class = &spi_master_class
;
1732 master
->dev
.parent
= dev
;
1733 spi_master_set_devdata(master
, &master
[1]);
1737 EXPORT_SYMBOL_GPL(spi_alloc_master
);
1740 static int of_spi_register_master(struct spi_master
*master
)
1743 struct device_node
*np
= master
->dev
.of_node
;
1748 nb
= of_gpio_named_count(np
, "cs-gpios");
1749 master
->num_chipselect
= max_t(int, nb
, master
->num_chipselect
);
1751 /* Return error only for an incorrectly formed cs-gpios property */
1752 if (nb
== 0 || nb
== -ENOENT
)
1757 cs
= devm_kzalloc(&master
->dev
,
1758 sizeof(int) * master
->num_chipselect
,
1760 master
->cs_gpios
= cs
;
1762 if (!master
->cs_gpios
)
1765 for (i
= 0; i
< master
->num_chipselect
; i
++)
1768 for (i
= 0; i
< nb
; i
++)
1769 cs
[i
] = of_get_named_gpio(np
, "cs-gpios", i
);
1774 static int of_spi_register_master(struct spi_master
*master
)
1781 * spi_register_master - register SPI master controller
1782 * @master: initialized master, originally from spi_alloc_master()
1783 * Context: can sleep
1785 * SPI master controllers connect to their drivers using some non-SPI bus,
1786 * such as the platform bus. The final stage of probe() in that code
1787 * includes calling spi_register_master() to hook up to this SPI bus glue.
1789 * SPI controllers use board specific (often SOC specific) bus numbers,
1790 * and board-specific addressing for SPI devices combines those numbers
1791 * with chip select numbers. Since SPI does not directly support dynamic
1792 * device identification, boards need configuration tables telling which
1793 * chip is at which address.
1795 * This must be called from context that can sleep. It returns zero on
1796 * success, else a negative error code (dropping the master's refcount).
1797 * After a successful return, the caller is responsible for calling
1798 * spi_unregister_master().
1800 * Return: zero on success, else a negative error code.
1802 int spi_register_master(struct spi_master
*master
)
1804 static atomic_t dyn_bus_id
= ATOMIC_INIT((1<<15) - 1);
1805 struct device
*dev
= master
->dev
.parent
;
1806 struct boardinfo
*bi
;
1807 int status
= -ENODEV
;
1813 status
= of_spi_register_master(master
);
1817 /* even if it's just one always-selected device, there must
1818 * be at least one chipselect
1820 if (master
->num_chipselect
== 0)
1823 if ((master
->bus_num
< 0) && master
->dev
.of_node
)
1824 master
->bus_num
= of_alias_get_id(master
->dev
.of_node
, "spi");
1826 /* convention: dynamically assigned bus IDs count down from the max */
1827 if (master
->bus_num
< 0) {
1828 /* FIXME switch to an IDR based scheme, something like
1829 * I2C now uses, so we can't run out of "dynamic" IDs
1831 master
->bus_num
= atomic_dec_return(&dyn_bus_id
);
1835 INIT_LIST_HEAD(&master
->queue
);
1836 spin_lock_init(&master
->queue_lock
);
1837 spin_lock_init(&master
->bus_lock_spinlock
);
1838 mutex_init(&master
->bus_lock_mutex
);
1839 master
->bus_lock_flag
= 0;
1840 init_completion(&master
->xfer_completion
);
1841 if (!master
->max_dma_len
)
1842 master
->max_dma_len
= INT_MAX
;
1844 /* register the device, then userspace will see it.
1845 * registration fails if the bus ID is in use.
1847 dev_set_name(&master
->dev
, "spi%u", master
->bus_num
);
1848 status
= device_add(&master
->dev
);
1851 dev_dbg(dev
, "registered master %s%s\n", dev_name(&master
->dev
),
1852 dynamic
? " (dynamic)" : "");
1854 /* If we're using a queued driver, start the queue */
1855 if (master
->transfer
)
1856 dev_info(dev
, "master is unqueued, this is deprecated\n");
1858 status
= spi_master_initialize_queue(master
);
1860 device_del(&master
->dev
);
1864 /* add statistics */
1865 spin_lock_init(&master
->statistics
.lock
);
1867 mutex_lock(&board_lock
);
1868 list_add_tail(&master
->list
, &spi_master_list
);
1869 list_for_each_entry(bi
, &board_list
, list
)
1870 spi_match_master_to_boardinfo(master
, &bi
->board_info
);
1871 mutex_unlock(&board_lock
);
1873 /* Register devices from the device tree and ACPI */
1874 of_register_spi_devices(master
);
1875 acpi_register_spi_devices(master
);
1879 EXPORT_SYMBOL_GPL(spi_register_master
);
1881 static void devm_spi_unregister(struct device
*dev
, void *res
)
1883 spi_unregister_master(*(struct spi_master
**)res
);
1887 * dev_spi_register_master - register managed SPI master controller
1888 * @dev: device managing SPI master
1889 * @master: initialized master, originally from spi_alloc_master()
1890 * Context: can sleep
1892 * Register a SPI device as with spi_register_master() which will
1893 * automatically be unregister
1895 * Return: zero on success, else a negative error code.
1897 int devm_spi_register_master(struct device
*dev
, struct spi_master
*master
)
1899 struct spi_master
**ptr
;
1902 ptr
= devres_alloc(devm_spi_unregister
, sizeof(*ptr
), GFP_KERNEL
);
1906 ret
= spi_register_master(master
);
1909 devres_add(dev
, ptr
);
1916 EXPORT_SYMBOL_GPL(devm_spi_register_master
);
1918 static int __unregister(struct device
*dev
, void *null
)
1920 spi_unregister_device(to_spi_device(dev
));
1925 * spi_unregister_master - unregister SPI master controller
1926 * @master: the master being unregistered
1927 * Context: can sleep
1929 * This call is used only by SPI master controller drivers, which are the
1930 * only ones directly touching chip registers.
1932 * This must be called from context that can sleep.
1934 void spi_unregister_master(struct spi_master
*master
)
1938 if (master
->queued
) {
1939 if (spi_destroy_queue(master
))
1940 dev_err(&master
->dev
, "queue remove failed\n");
1943 mutex_lock(&board_lock
);
1944 list_del(&master
->list
);
1945 mutex_unlock(&board_lock
);
1947 dummy
= device_for_each_child(&master
->dev
, NULL
, __unregister
);
1948 device_unregister(&master
->dev
);
1950 EXPORT_SYMBOL_GPL(spi_unregister_master
);
1952 int spi_master_suspend(struct spi_master
*master
)
1956 /* Basically no-ops for non-queued masters */
1957 if (!master
->queued
)
1960 ret
= spi_stop_queue(master
);
1962 dev_err(&master
->dev
, "queue stop failed\n");
1966 EXPORT_SYMBOL_GPL(spi_master_suspend
);
1968 int spi_master_resume(struct spi_master
*master
)
1972 if (!master
->queued
)
1975 ret
= spi_start_queue(master
);
1977 dev_err(&master
->dev
, "queue restart failed\n");
1981 EXPORT_SYMBOL_GPL(spi_master_resume
);
1983 static int __spi_master_match(struct device
*dev
, const void *data
)
1985 struct spi_master
*m
;
1986 const u16
*bus_num
= data
;
1988 m
= container_of(dev
, struct spi_master
, dev
);
1989 return m
->bus_num
== *bus_num
;
1993 * spi_busnum_to_master - look up master associated with bus_num
1994 * @bus_num: the master's bus number
1995 * Context: can sleep
1997 * This call may be used with devices that are registered after
1998 * arch init time. It returns a refcounted pointer to the relevant
1999 * spi_master (which the caller must release), or NULL if there is
2000 * no such master registered.
2002 * Return: the SPI master structure on success, else NULL.
2004 struct spi_master
*spi_busnum_to_master(u16 bus_num
)
2007 struct spi_master
*master
= NULL
;
2009 dev
= class_find_device(&spi_master_class
, NULL
, &bus_num
,
2010 __spi_master_match
);
2012 master
= container_of(dev
, struct spi_master
, dev
);
2013 /* reference got in class_find_device */
2016 EXPORT_SYMBOL_GPL(spi_busnum_to_master
);
2018 /*-------------------------------------------------------------------------*/
2020 /* Core methods for SPI resource management */
2023 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2024 * during the processing of a spi_message while using
2026 * @spi: the spi device for which we allocate memory
2027 * @release: the release code to execute for this resource
2028 * @size: size to alloc and return
2029 * @gfp: GFP allocation flags
2031 * Return: the pointer to the allocated data
2033 * This may get enhanced in the future to allocate from a memory pool
2034 * of the @spi_device or @spi_master to avoid repeated allocations.
2036 void *spi_res_alloc(struct spi_device
*spi
,
2037 spi_res_release_t release
,
2038 size_t size
, gfp_t gfp
)
2040 struct spi_res
*sres
;
2042 sres
= kzalloc(sizeof(*sres
) + size
, gfp
);
2046 INIT_LIST_HEAD(&sres
->entry
);
2047 sres
->release
= release
;
2051 EXPORT_SYMBOL_GPL(spi_res_alloc
);
2054 * spi_res_free - free an spi resource
2055 * @res: pointer to the custom data of a resource
2058 void spi_res_free(void *res
)
2060 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2065 WARN_ON(!list_empty(&sres
->entry
));
2068 EXPORT_SYMBOL_GPL(spi_res_free
);
2071 * spi_res_add - add a spi_res to the spi_message
2072 * @message: the spi message
2073 * @res: the spi_resource
2075 void spi_res_add(struct spi_message
*message
, void *res
)
2077 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2079 WARN_ON(!list_empty(&sres
->entry
));
2080 list_add_tail(&sres
->entry
, &message
->resources
);
2082 EXPORT_SYMBOL_GPL(spi_res_add
);
2085 * spi_res_release - release all spi resources for this message
2086 * @master: the @spi_master
2087 * @message: the @spi_message
2089 void spi_res_release(struct spi_master
*master
,
2090 struct spi_message
*message
)
2092 struct spi_res
*res
;
2094 while (!list_empty(&message
->resources
)) {
2095 res
= list_last_entry(&message
->resources
,
2096 struct spi_res
, entry
);
2099 res
->release(master
, message
, res
->data
);
2101 list_del(&res
->entry
);
2106 EXPORT_SYMBOL_GPL(spi_res_release
);
2108 /*-------------------------------------------------------------------------*/
2110 /* Core methods for SPI master protocol drivers. Some of the
2111 * other core methods are currently defined as inline functions.
2114 static int __spi_validate_bits_per_word(struct spi_master
*master
, u8 bits_per_word
)
2116 if (master
->bits_per_word_mask
) {
2117 /* Only 32 bits fit in the mask */
2118 if (bits_per_word
> 32)
2120 if (!(master
->bits_per_word_mask
&
2121 SPI_BPW_MASK(bits_per_word
)))
2129 * spi_setup - setup SPI mode and clock rate
2130 * @spi: the device whose settings are being modified
2131 * Context: can sleep, and no requests are queued to the device
2133 * SPI protocol drivers may need to update the transfer mode if the
2134 * device doesn't work with its default. They may likewise need
2135 * to update clock rates or word sizes from initial values. This function
2136 * changes those settings, and must be called from a context that can sleep.
2137 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2138 * effect the next time the device is selected and data is transferred to
2139 * or from it. When this function returns, the spi device is deselected.
2141 * Note that this call will fail if the protocol driver specifies an option
2142 * that the underlying controller or its driver does not support. For
2143 * example, not all hardware supports wire transfers using nine bit words,
2144 * LSB-first wire encoding, or active-high chipselects.
2146 * Return: zero on success, else a negative error code.
2148 int spi_setup(struct spi_device
*spi
)
2150 unsigned bad_bits
, ugly_bits
;
2153 /* check mode to prevent that DUAL and QUAD set at the same time
2155 if (((spi
->mode
& SPI_TX_DUAL
) && (spi
->mode
& SPI_TX_QUAD
)) ||
2156 ((spi
->mode
& SPI_RX_DUAL
) && (spi
->mode
& SPI_RX_QUAD
))) {
2158 "setup: can not select dual and quad at the same time\n");
2161 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2163 if ((spi
->mode
& SPI_3WIRE
) && (spi
->mode
&
2164 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
)))
2166 /* help drivers fail *cleanly* when they need options
2167 * that aren't supported with their current master
2169 bad_bits
= spi
->mode
& ~spi
->master
->mode_bits
;
2170 ugly_bits
= bad_bits
&
2171 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
);
2174 "setup: ignoring unsupported mode bits %x\n",
2176 spi
->mode
&= ~ugly_bits
;
2177 bad_bits
&= ~ugly_bits
;
2180 dev_err(&spi
->dev
, "setup: unsupported mode bits %x\n",
2185 if (!spi
->bits_per_word
)
2186 spi
->bits_per_word
= 8;
2188 status
= __spi_validate_bits_per_word(spi
->master
, spi
->bits_per_word
);
2192 if (!spi
->max_speed_hz
)
2193 spi
->max_speed_hz
= spi
->master
->max_speed_hz
;
2195 if (spi
->master
->setup
)
2196 status
= spi
->master
->setup(spi
);
2198 spi_set_cs(spi
, false);
2200 dev_dbg(&spi
->dev
, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2201 (int) (spi
->mode
& (SPI_CPOL
| SPI_CPHA
)),
2202 (spi
->mode
& SPI_CS_HIGH
) ? "cs_high, " : "",
2203 (spi
->mode
& SPI_LSB_FIRST
) ? "lsb, " : "",
2204 (spi
->mode
& SPI_3WIRE
) ? "3wire, " : "",
2205 (spi
->mode
& SPI_LOOP
) ? "loopback, " : "",
2206 spi
->bits_per_word
, spi
->max_speed_hz
,
2211 EXPORT_SYMBOL_GPL(spi_setup
);
2213 static int __spi_validate(struct spi_device
*spi
, struct spi_message
*message
)
2215 struct spi_master
*master
= spi
->master
;
2216 struct spi_transfer
*xfer
;
2219 if (list_empty(&message
->transfers
))
2222 /* Half-duplex links include original MicroWire, and ones with
2223 * only one data pin like SPI_3WIRE (switches direction) or where
2224 * either MOSI or MISO is missing. They can also be caused by
2225 * software limitations.
2227 if ((master
->flags
& SPI_MASTER_HALF_DUPLEX
)
2228 || (spi
->mode
& SPI_3WIRE
)) {
2229 unsigned flags
= master
->flags
;
2231 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
2232 if (xfer
->rx_buf
&& xfer
->tx_buf
)
2234 if ((flags
& SPI_MASTER_NO_TX
) && xfer
->tx_buf
)
2236 if ((flags
& SPI_MASTER_NO_RX
) && xfer
->rx_buf
)
2242 * Set transfer bits_per_word and max speed as spi device default if
2243 * it is not set for this transfer.
2244 * Set transfer tx_nbits and rx_nbits as single transfer default
2245 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2247 message
->frame_length
= 0;
2248 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
2249 message
->frame_length
+= xfer
->len
;
2250 if (!xfer
->bits_per_word
)
2251 xfer
->bits_per_word
= spi
->bits_per_word
;
2253 if (!xfer
->speed_hz
)
2254 xfer
->speed_hz
= spi
->max_speed_hz
;
2255 if (!xfer
->speed_hz
)
2256 xfer
->speed_hz
= master
->max_speed_hz
;
2258 if (master
->max_speed_hz
&&
2259 xfer
->speed_hz
> master
->max_speed_hz
)
2260 xfer
->speed_hz
= master
->max_speed_hz
;
2262 if (__spi_validate_bits_per_word(master
, xfer
->bits_per_word
))
2266 * SPI transfer length should be multiple of SPI word size
2267 * where SPI word size should be power-of-two multiple
2269 if (xfer
->bits_per_word
<= 8)
2271 else if (xfer
->bits_per_word
<= 16)
2276 /* No partial transfers accepted */
2277 if (xfer
->len
% w_size
)
2280 if (xfer
->speed_hz
&& master
->min_speed_hz
&&
2281 xfer
->speed_hz
< master
->min_speed_hz
)
2284 if (xfer
->tx_buf
&& !xfer
->tx_nbits
)
2285 xfer
->tx_nbits
= SPI_NBITS_SINGLE
;
2286 if (xfer
->rx_buf
&& !xfer
->rx_nbits
)
2287 xfer
->rx_nbits
= SPI_NBITS_SINGLE
;
2288 /* check transfer tx/rx_nbits:
2289 * 1. check the value matches one of single, dual and quad
2290 * 2. check tx/rx_nbits match the mode in spi_device
2293 if (xfer
->tx_nbits
!= SPI_NBITS_SINGLE
&&
2294 xfer
->tx_nbits
!= SPI_NBITS_DUAL
&&
2295 xfer
->tx_nbits
!= SPI_NBITS_QUAD
)
2297 if ((xfer
->tx_nbits
== SPI_NBITS_DUAL
) &&
2298 !(spi
->mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
)))
2300 if ((xfer
->tx_nbits
== SPI_NBITS_QUAD
) &&
2301 !(spi
->mode
& SPI_TX_QUAD
))
2304 /* check transfer rx_nbits */
2306 if (xfer
->rx_nbits
!= SPI_NBITS_SINGLE
&&
2307 xfer
->rx_nbits
!= SPI_NBITS_DUAL
&&
2308 xfer
->rx_nbits
!= SPI_NBITS_QUAD
)
2310 if ((xfer
->rx_nbits
== SPI_NBITS_DUAL
) &&
2311 !(spi
->mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
)))
2313 if ((xfer
->rx_nbits
== SPI_NBITS_QUAD
) &&
2314 !(spi
->mode
& SPI_RX_QUAD
))
2319 message
->status
= -EINPROGRESS
;
2324 static int __spi_async(struct spi_device
*spi
, struct spi_message
*message
)
2326 struct spi_master
*master
= spi
->master
;
2330 SPI_STATISTICS_INCREMENT_FIELD(&master
->statistics
, spi_async
);
2331 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_async
);
2333 trace_spi_message_submit(message
);
2335 return master
->transfer(spi
, message
);
2339 * spi_async - asynchronous SPI transfer
2340 * @spi: device with which data will be exchanged
2341 * @message: describes the data transfers, including completion callback
2342 * Context: any (irqs may be blocked, etc)
2344 * This call may be used in_irq and other contexts which can't sleep,
2345 * as well as from task contexts which can sleep.
2347 * The completion callback is invoked in a context which can't sleep.
2348 * Before that invocation, the value of message->status is undefined.
2349 * When the callback is issued, message->status holds either zero (to
2350 * indicate complete success) or a negative error code. After that
2351 * callback returns, the driver which issued the transfer request may
2352 * deallocate the associated memory; it's no longer in use by any SPI
2353 * core or controller driver code.
2355 * Note that although all messages to a spi_device are handled in
2356 * FIFO order, messages may go to different devices in other orders.
2357 * Some device might be higher priority, or have various "hard" access
2358 * time requirements, for example.
2360 * On detection of any fault during the transfer, processing of
2361 * the entire message is aborted, and the device is deselected.
2362 * Until returning from the associated message completion callback,
2363 * no other spi_message queued to that device will be processed.
2364 * (This rule applies equally to all the synchronous transfer calls,
2365 * which are wrappers around this core asynchronous primitive.)
2367 * Return: zero on success, else a negative error code.
2369 int spi_async(struct spi_device
*spi
, struct spi_message
*message
)
2371 struct spi_master
*master
= spi
->master
;
2373 unsigned long flags
;
2375 ret
= __spi_validate(spi
, message
);
2379 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2381 if (master
->bus_lock_flag
)
2384 ret
= __spi_async(spi
, message
);
2386 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2390 EXPORT_SYMBOL_GPL(spi_async
);
2393 * spi_async_locked - version of spi_async with exclusive bus usage
2394 * @spi: device with which data will be exchanged
2395 * @message: describes the data transfers, including completion callback
2396 * Context: any (irqs may be blocked, etc)
2398 * This call may be used in_irq and other contexts which can't sleep,
2399 * as well as from task contexts which can sleep.
2401 * The completion callback is invoked in a context which can't sleep.
2402 * Before that invocation, the value of message->status is undefined.
2403 * When the callback is issued, message->status holds either zero (to
2404 * indicate complete success) or a negative error code. After that
2405 * callback returns, the driver which issued the transfer request may
2406 * deallocate the associated memory; it's no longer in use by any SPI
2407 * core or controller driver code.
2409 * Note that although all messages to a spi_device are handled in
2410 * FIFO order, messages may go to different devices in other orders.
2411 * Some device might be higher priority, or have various "hard" access
2412 * time requirements, for example.
2414 * On detection of any fault during the transfer, processing of
2415 * the entire message is aborted, and the device is deselected.
2416 * Until returning from the associated message completion callback,
2417 * no other spi_message queued to that device will be processed.
2418 * (This rule applies equally to all the synchronous transfer calls,
2419 * which are wrappers around this core asynchronous primitive.)
2421 * Return: zero on success, else a negative error code.
2423 int spi_async_locked(struct spi_device
*spi
, struct spi_message
*message
)
2425 struct spi_master
*master
= spi
->master
;
2427 unsigned long flags
;
2429 ret
= __spi_validate(spi
, message
);
2433 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2435 ret
= __spi_async(spi
, message
);
2437 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2442 EXPORT_SYMBOL_GPL(spi_async_locked
);
2445 /*-------------------------------------------------------------------------*/
2447 /* Utility methods for SPI master protocol drivers, layered on
2448 * top of the core. Some other utility methods are defined as
2452 static void spi_complete(void *arg
)
2457 static int __spi_sync(struct spi_device
*spi
, struct spi_message
*message
,
2460 DECLARE_COMPLETION_ONSTACK(done
);
2462 struct spi_master
*master
= spi
->master
;
2463 unsigned long flags
;
2465 status
= __spi_validate(spi
, message
);
2469 message
->complete
= spi_complete
;
2470 message
->context
= &done
;
2473 SPI_STATISTICS_INCREMENT_FIELD(&master
->statistics
, spi_sync
);
2474 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_sync
);
2477 mutex_lock(&master
->bus_lock_mutex
);
2479 /* If we're not using the legacy transfer method then we will
2480 * try to transfer in the calling context so special case.
2481 * This code would be less tricky if we could remove the
2482 * support for driver implemented message queues.
2484 if (master
->transfer
== spi_queued_transfer
) {
2485 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2487 trace_spi_message_submit(message
);
2489 status
= __spi_queued_transfer(spi
, message
, false);
2491 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2493 status
= spi_async_locked(spi
, message
);
2497 mutex_unlock(&master
->bus_lock_mutex
);
2500 /* Push out the messages in the calling context if we
2503 if (master
->transfer
== spi_queued_transfer
) {
2504 SPI_STATISTICS_INCREMENT_FIELD(&master
->statistics
,
2505 spi_sync_immediate
);
2506 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
,
2507 spi_sync_immediate
);
2508 __spi_pump_messages(master
, false);
2511 wait_for_completion(&done
);
2512 status
= message
->status
;
2514 message
->context
= NULL
;
2519 * spi_sync - blocking/synchronous SPI data transfers
2520 * @spi: device with which data will be exchanged
2521 * @message: describes the data transfers
2522 * Context: can sleep
2524 * This call may only be used from a context that may sleep. The sleep
2525 * is non-interruptible, and has no timeout. Low-overhead controller
2526 * drivers may DMA directly into and out of the message buffers.
2528 * Note that the SPI device's chip select is active during the message,
2529 * and then is normally disabled between messages. Drivers for some
2530 * frequently-used devices may want to minimize costs of selecting a chip,
2531 * by leaving it selected in anticipation that the next message will go
2532 * to the same chip. (That may increase power usage.)
2534 * Also, the caller is guaranteeing that the memory associated with the
2535 * message will not be freed before this call returns.
2537 * Return: zero on success, else a negative error code.
2539 int spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
2541 return __spi_sync(spi
, message
, 0);
2543 EXPORT_SYMBOL_GPL(spi_sync
);
2546 * spi_sync_locked - version of spi_sync with exclusive bus usage
2547 * @spi: device with which data will be exchanged
2548 * @message: describes the data transfers
2549 * Context: can sleep
2551 * This call may only be used from a context that may sleep. The sleep
2552 * is non-interruptible, and has no timeout. Low-overhead controller
2553 * drivers may DMA directly into and out of the message buffers.
2555 * This call should be used by drivers that require exclusive access to the
2556 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2557 * be released by a spi_bus_unlock call when the exclusive access is over.
2559 * Return: zero on success, else a negative error code.
2561 int spi_sync_locked(struct spi_device
*spi
, struct spi_message
*message
)
2563 return __spi_sync(spi
, message
, 1);
2565 EXPORT_SYMBOL_GPL(spi_sync_locked
);
2568 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2569 * @master: SPI bus master that should be locked for exclusive bus access
2570 * Context: can sleep
2572 * This call may only be used from a context that may sleep. The sleep
2573 * is non-interruptible, and has no timeout.
2575 * This call should be used by drivers that require exclusive access to the
2576 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2577 * exclusive access is over. Data transfer must be done by spi_sync_locked
2578 * and spi_async_locked calls when the SPI bus lock is held.
2580 * Return: always zero.
2582 int spi_bus_lock(struct spi_master
*master
)
2584 unsigned long flags
;
2586 mutex_lock(&master
->bus_lock_mutex
);
2588 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2589 master
->bus_lock_flag
= 1;
2590 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2592 /* mutex remains locked until spi_bus_unlock is called */
2596 EXPORT_SYMBOL_GPL(spi_bus_lock
);
2599 * spi_bus_unlock - release the lock for exclusive SPI bus usage
2600 * @master: SPI bus master that was locked for exclusive bus access
2601 * Context: can sleep
2603 * This call may only be used from a context that may sleep. The sleep
2604 * is non-interruptible, and has no timeout.
2606 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2609 * Return: always zero.
2611 int spi_bus_unlock(struct spi_master
*master
)
2613 master
->bus_lock_flag
= 0;
2615 mutex_unlock(&master
->bus_lock_mutex
);
2619 EXPORT_SYMBOL_GPL(spi_bus_unlock
);
2621 /* portable code must never pass more than 32 bytes */
2622 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
2627 * spi_write_then_read - SPI synchronous write followed by read
2628 * @spi: device with which data will be exchanged
2629 * @txbuf: data to be written (need not be dma-safe)
2630 * @n_tx: size of txbuf, in bytes
2631 * @rxbuf: buffer into which data will be read (need not be dma-safe)
2632 * @n_rx: size of rxbuf, in bytes
2633 * Context: can sleep
2635 * This performs a half duplex MicroWire style transaction with the
2636 * device, sending txbuf and then reading rxbuf. The return value
2637 * is zero for success, else a negative errno status code.
2638 * This call may only be used from a context that may sleep.
2640 * Parameters to this routine are always copied using a small buffer;
2641 * portable code should never use this for more than 32 bytes.
2642 * Performance-sensitive or bulk transfer code should instead use
2643 * spi_{async,sync}() calls with dma-safe buffers.
2645 * Return: zero on success, else a negative error code.
2647 int spi_write_then_read(struct spi_device
*spi
,
2648 const void *txbuf
, unsigned n_tx
,
2649 void *rxbuf
, unsigned n_rx
)
2651 static DEFINE_MUTEX(lock
);
2654 struct spi_message message
;
2655 struct spi_transfer x
[2];
2658 /* Use preallocated DMA-safe buffer if we can. We can't avoid
2659 * copying here, (as a pure convenience thing), but we can
2660 * keep heap costs out of the hot path unless someone else is
2661 * using the pre-allocated buffer or the transfer is too large.
2663 if ((n_tx
+ n_rx
) > SPI_BUFSIZ
|| !mutex_trylock(&lock
)) {
2664 local_buf
= kmalloc(max((unsigned)SPI_BUFSIZ
, n_tx
+ n_rx
),
2665 GFP_KERNEL
| GFP_DMA
);
2672 spi_message_init(&message
);
2673 memset(x
, 0, sizeof(x
));
2676 spi_message_add_tail(&x
[0], &message
);
2680 spi_message_add_tail(&x
[1], &message
);
2683 memcpy(local_buf
, txbuf
, n_tx
);
2684 x
[0].tx_buf
= local_buf
;
2685 x
[1].rx_buf
= local_buf
+ n_tx
;
2688 status
= spi_sync(spi
, &message
);
2690 memcpy(rxbuf
, x
[1].rx_buf
, n_rx
);
2692 if (x
[0].tx_buf
== buf
)
2693 mutex_unlock(&lock
);
2699 EXPORT_SYMBOL_GPL(spi_write_then_read
);
2701 /*-------------------------------------------------------------------------*/
2703 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
2704 static int __spi_of_device_match(struct device
*dev
, void *data
)
2706 return dev
->of_node
== data
;
2709 /* must call put_device() when done with returned spi_device device */
2710 static struct spi_device
*of_find_spi_device_by_node(struct device_node
*node
)
2712 struct device
*dev
= bus_find_device(&spi_bus_type
, NULL
, node
,
2713 __spi_of_device_match
);
2714 return dev
? to_spi_device(dev
) : NULL
;
2717 static int __spi_of_master_match(struct device
*dev
, const void *data
)
2719 return dev
->of_node
== data
;
2722 /* the spi masters are not using spi_bus, so we find it with another way */
2723 static struct spi_master
*of_find_spi_master_by_node(struct device_node
*node
)
2727 dev
= class_find_device(&spi_master_class
, NULL
, node
,
2728 __spi_of_master_match
);
2732 /* reference got in class_find_device */
2733 return container_of(dev
, struct spi_master
, dev
);
2736 static int of_spi_notify(struct notifier_block
*nb
, unsigned long action
,
2739 struct of_reconfig_data
*rd
= arg
;
2740 struct spi_master
*master
;
2741 struct spi_device
*spi
;
2743 switch (of_reconfig_get_state_change(action
, arg
)) {
2744 case OF_RECONFIG_CHANGE_ADD
:
2745 master
= of_find_spi_master_by_node(rd
->dn
->parent
);
2747 return NOTIFY_OK
; /* not for us */
2749 if (of_node_test_and_set_flag(rd
->dn
, OF_POPULATED
)) {
2750 put_device(&master
->dev
);
2754 spi
= of_register_spi_device(master
, rd
->dn
);
2755 put_device(&master
->dev
);
2758 pr_err("%s: failed to create for '%s'\n",
2759 __func__
, rd
->dn
->full_name
);
2760 return notifier_from_errno(PTR_ERR(spi
));
2764 case OF_RECONFIG_CHANGE_REMOVE
:
2765 /* already depopulated? */
2766 if (!of_node_check_flag(rd
->dn
, OF_POPULATED
))
2769 /* find our device by node */
2770 spi
= of_find_spi_device_by_node(rd
->dn
);
2772 return NOTIFY_OK
; /* no? not meant for us */
2774 /* unregister takes one ref away */
2775 spi_unregister_device(spi
);
2777 /* and put the reference of the find */
2778 put_device(&spi
->dev
);
2785 static struct notifier_block spi_of_notifier
= {
2786 .notifier_call
= of_spi_notify
,
2788 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2789 extern struct notifier_block spi_of_notifier
;
2790 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2792 static int __init
spi_init(void)
2796 buf
= kmalloc(SPI_BUFSIZ
, GFP_KERNEL
);
2802 status
= bus_register(&spi_bus_type
);
2806 status
= class_register(&spi_master_class
);
2810 if (IS_ENABLED(CONFIG_OF_DYNAMIC
))
2811 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier
));
2816 bus_unregister(&spi_bus_type
);
2824 /* board_info is normally registered in arch_initcall(),
2825 * but even essential drivers wait till later
2827 * REVISIT only boardinfo really needs static linking. the rest (device and
2828 * driver registration) _could_ be dynamically linked (modular) ... costs
2829 * include needing to have boardinfo data structures be much more public.
2831 postcore_initcall(spi_init
);