4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/property.h>
35 #include <linux/export.h>
36 #include <linux/sched/rt.h>
37 #include <uapi/linux/sched/types.h>
38 #include <linux/delay.h>
39 #include <linux/kthread.h>
40 #include <linux/ioport.h>
41 #include <linux/acpi.h>
42 #include <linux/highmem.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/spi.h>
47 static void spidev_release(struct device
*dev
)
49 struct spi_device
*spi
= to_spi_device(dev
);
51 /* spi masters may cleanup for released devices */
52 if (spi
->master
->cleanup
)
53 spi
->master
->cleanup(spi
);
55 spi_master_put(spi
->master
);
60 modalias_show(struct device
*dev
, struct device_attribute
*a
, char *buf
)
62 const struct spi_device
*spi
= to_spi_device(dev
);
65 len
= acpi_device_modalias(dev
, buf
, PAGE_SIZE
- 1);
69 return sprintf(buf
, "%s%s\n", SPI_MODULE_PREFIX
, spi
->modalias
);
71 static DEVICE_ATTR_RO(modalias
);
73 #define SPI_STATISTICS_ATTRS(field, file) \
74 static ssize_t spi_master_##field##_show(struct device *dev, \
75 struct device_attribute *attr, \
78 struct spi_master *master = container_of(dev, \
79 struct spi_master, dev); \
80 return spi_statistics_##field##_show(&master->statistics, buf); \
82 static struct device_attribute dev_attr_spi_master_##field = { \
83 .attr = { .name = file, .mode = S_IRUGO }, \
84 .show = spi_master_##field##_show, \
86 static ssize_t spi_device_##field##_show(struct device *dev, \
87 struct device_attribute *attr, \
90 struct spi_device *spi = to_spi_device(dev); \
91 return spi_statistics_##field##_show(&spi->statistics, buf); \
93 static struct device_attribute dev_attr_spi_device_##field = { \
94 .attr = { .name = file, .mode = S_IRUGO }, \
95 .show = spi_device_##field##_show, \
98 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
99 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
102 unsigned long flags; \
104 spin_lock_irqsave(&stat->lock, flags); \
105 len = sprintf(buf, format_string, stat->field); \
106 spin_unlock_irqrestore(&stat->lock, flags); \
109 SPI_STATISTICS_ATTRS(name, file)
111 #define SPI_STATISTICS_SHOW(field, format_string) \
112 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
113 field, format_string)
115 SPI_STATISTICS_SHOW(messages
, "%lu");
116 SPI_STATISTICS_SHOW(transfers
, "%lu");
117 SPI_STATISTICS_SHOW(errors
, "%lu");
118 SPI_STATISTICS_SHOW(timedout
, "%lu");
120 SPI_STATISTICS_SHOW(spi_sync
, "%lu");
121 SPI_STATISTICS_SHOW(spi_sync_immediate
, "%lu");
122 SPI_STATISTICS_SHOW(spi_async
, "%lu");
124 SPI_STATISTICS_SHOW(bytes
, "%llu");
125 SPI_STATISTICS_SHOW(bytes_rx
, "%llu");
126 SPI_STATISTICS_SHOW(bytes_tx
, "%llu");
128 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
129 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
130 "transfer_bytes_histo_" number, \
131 transfer_bytes_histo[index], "%lu")
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
147 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
148 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
150 SPI_STATISTICS_SHOW(transfers_split_maxsize
, "%lu");
152 static struct attribute
*spi_dev_attrs
[] = {
153 &dev_attr_modalias
.attr
,
157 static const struct attribute_group spi_dev_group
= {
158 .attrs
= spi_dev_attrs
,
161 static struct attribute
*spi_device_statistics_attrs
[] = {
162 &dev_attr_spi_device_messages
.attr
,
163 &dev_attr_spi_device_transfers
.attr
,
164 &dev_attr_spi_device_errors
.attr
,
165 &dev_attr_spi_device_timedout
.attr
,
166 &dev_attr_spi_device_spi_sync
.attr
,
167 &dev_attr_spi_device_spi_sync_immediate
.attr
,
168 &dev_attr_spi_device_spi_async
.attr
,
169 &dev_attr_spi_device_bytes
.attr
,
170 &dev_attr_spi_device_bytes_rx
.attr
,
171 &dev_attr_spi_device_bytes_tx
.attr
,
172 &dev_attr_spi_device_transfer_bytes_histo0
.attr
,
173 &dev_attr_spi_device_transfer_bytes_histo1
.attr
,
174 &dev_attr_spi_device_transfer_bytes_histo2
.attr
,
175 &dev_attr_spi_device_transfer_bytes_histo3
.attr
,
176 &dev_attr_spi_device_transfer_bytes_histo4
.attr
,
177 &dev_attr_spi_device_transfer_bytes_histo5
.attr
,
178 &dev_attr_spi_device_transfer_bytes_histo6
.attr
,
179 &dev_attr_spi_device_transfer_bytes_histo7
.attr
,
180 &dev_attr_spi_device_transfer_bytes_histo8
.attr
,
181 &dev_attr_spi_device_transfer_bytes_histo9
.attr
,
182 &dev_attr_spi_device_transfer_bytes_histo10
.attr
,
183 &dev_attr_spi_device_transfer_bytes_histo11
.attr
,
184 &dev_attr_spi_device_transfer_bytes_histo12
.attr
,
185 &dev_attr_spi_device_transfer_bytes_histo13
.attr
,
186 &dev_attr_spi_device_transfer_bytes_histo14
.attr
,
187 &dev_attr_spi_device_transfer_bytes_histo15
.attr
,
188 &dev_attr_spi_device_transfer_bytes_histo16
.attr
,
189 &dev_attr_spi_device_transfers_split_maxsize
.attr
,
193 static const struct attribute_group spi_device_statistics_group
= {
194 .name
= "statistics",
195 .attrs
= spi_device_statistics_attrs
,
198 static const struct attribute_group
*spi_dev_groups
[] = {
200 &spi_device_statistics_group
,
204 static struct attribute
*spi_master_statistics_attrs
[] = {
205 &dev_attr_spi_master_messages
.attr
,
206 &dev_attr_spi_master_transfers
.attr
,
207 &dev_attr_spi_master_errors
.attr
,
208 &dev_attr_spi_master_timedout
.attr
,
209 &dev_attr_spi_master_spi_sync
.attr
,
210 &dev_attr_spi_master_spi_sync_immediate
.attr
,
211 &dev_attr_spi_master_spi_async
.attr
,
212 &dev_attr_spi_master_bytes
.attr
,
213 &dev_attr_spi_master_bytes_rx
.attr
,
214 &dev_attr_spi_master_bytes_tx
.attr
,
215 &dev_attr_spi_master_transfer_bytes_histo0
.attr
,
216 &dev_attr_spi_master_transfer_bytes_histo1
.attr
,
217 &dev_attr_spi_master_transfer_bytes_histo2
.attr
,
218 &dev_attr_spi_master_transfer_bytes_histo3
.attr
,
219 &dev_attr_spi_master_transfer_bytes_histo4
.attr
,
220 &dev_attr_spi_master_transfer_bytes_histo5
.attr
,
221 &dev_attr_spi_master_transfer_bytes_histo6
.attr
,
222 &dev_attr_spi_master_transfer_bytes_histo7
.attr
,
223 &dev_attr_spi_master_transfer_bytes_histo8
.attr
,
224 &dev_attr_spi_master_transfer_bytes_histo9
.attr
,
225 &dev_attr_spi_master_transfer_bytes_histo10
.attr
,
226 &dev_attr_spi_master_transfer_bytes_histo11
.attr
,
227 &dev_attr_spi_master_transfer_bytes_histo12
.attr
,
228 &dev_attr_spi_master_transfer_bytes_histo13
.attr
,
229 &dev_attr_spi_master_transfer_bytes_histo14
.attr
,
230 &dev_attr_spi_master_transfer_bytes_histo15
.attr
,
231 &dev_attr_spi_master_transfer_bytes_histo16
.attr
,
232 &dev_attr_spi_master_transfers_split_maxsize
.attr
,
236 static const struct attribute_group spi_master_statistics_group
= {
237 .name
= "statistics",
238 .attrs
= spi_master_statistics_attrs
,
241 static const struct attribute_group
*spi_master_groups
[] = {
242 &spi_master_statistics_group
,
246 void spi_statistics_add_transfer_stats(struct spi_statistics
*stats
,
247 struct spi_transfer
*xfer
,
248 struct spi_master
*master
)
251 int l2len
= min(fls(xfer
->len
), SPI_STATISTICS_HISTO_SIZE
) - 1;
256 spin_lock_irqsave(&stats
->lock
, flags
);
259 stats
->transfer_bytes_histo
[l2len
]++;
261 stats
->bytes
+= xfer
->len
;
262 if ((xfer
->tx_buf
) &&
263 (xfer
->tx_buf
!= master
->dummy_tx
))
264 stats
->bytes_tx
+= xfer
->len
;
265 if ((xfer
->rx_buf
) &&
266 (xfer
->rx_buf
!= master
->dummy_rx
))
267 stats
->bytes_rx
+= xfer
->len
;
269 spin_unlock_irqrestore(&stats
->lock
, flags
);
271 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats
);
273 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
274 * and the sysfs version makes coldplug work too.
277 static const struct spi_device_id
*spi_match_id(const struct spi_device_id
*id
,
278 const struct spi_device
*sdev
)
280 while (id
->name
[0]) {
281 if (!strcmp(sdev
->modalias
, id
->name
))
288 const struct spi_device_id
*spi_get_device_id(const struct spi_device
*sdev
)
290 const struct spi_driver
*sdrv
= to_spi_driver(sdev
->dev
.driver
);
292 return spi_match_id(sdrv
->id_table
, sdev
);
294 EXPORT_SYMBOL_GPL(spi_get_device_id
);
296 static int spi_match_device(struct device
*dev
, struct device_driver
*drv
)
298 const struct spi_device
*spi
= to_spi_device(dev
);
299 const struct spi_driver
*sdrv
= to_spi_driver(drv
);
301 /* Attempt an OF style match */
302 if (of_driver_match_device(dev
, drv
))
306 if (acpi_driver_match_device(dev
, drv
))
310 return !!spi_match_id(sdrv
->id_table
, spi
);
312 return strcmp(spi
->modalias
, drv
->name
) == 0;
315 static int spi_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
317 const struct spi_device
*spi
= to_spi_device(dev
);
320 rc
= acpi_device_uevent_modalias(dev
, env
);
324 add_uevent_var(env
, "MODALIAS=%s%s", SPI_MODULE_PREFIX
, spi
->modalias
);
328 struct bus_type spi_bus_type
= {
330 .dev_groups
= spi_dev_groups
,
331 .match
= spi_match_device
,
332 .uevent
= spi_uevent
,
334 EXPORT_SYMBOL_GPL(spi_bus_type
);
337 static int spi_drv_probe(struct device
*dev
)
339 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
340 struct spi_device
*spi
= to_spi_device(dev
);
343 ret
= of_clk_set_defaults(dev
->of_node
, false);
348 spi
->irq
= of_irq_get(dev
->of_node
, 0);
349 if (spi
->irq
== -EPROBE_DEFER
)
350 return -EPROBE_DEFER
;
355 ret
= dev_pm_domain_attach(dev
, true);
356 if (ret
!= -EPROBE_DEFER
) {
357 ret
= sdrv
->probe(spi
);
359 dev_pm_domain_detach(dev
, true);
365 static int spi_drv_remove(struct device
*dev
)
367 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
370 ret
= sdrv
->remove(to_spi_device(dev
));
371 dev_pm_domain_detach(dev
, true);
376 static void spi_drv_shutdown(struct device
*dev
)
378 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
380 sdrv
->shutdown(to_spi_device(dev
));
384 * __spi_register_driver - register a SPI driver
385 * @owner: owner module of the driver to register
386 * @sdrv: the driver to register
389 * Return: zero on success, else a negative error code.
391 int __spi_register_driver(struct module
*owner
, struct spi_driver
*sdrv
)
393 sdrv
->driver
.owner
= owner
;
394 sdrv
->driver
.bus
= &spi_bus_type
;
396 sdrv
->driver
.probe
= spi_drv_probe
;
398 sdrv
->driver
.remove
= spi_drv_remove
;
400 sdrv
->driver
.shutdown
= spi_drv_shutdown
;
401 return driver_register(&sdrv
->driver
);
403 EXPORT_SYMBOL_GPL(__spi_register_driver
);
405 /*-------------------------------------------------------------------------*/
407 /* SPI devices should normally not be created by SPI device drivers; that
408 * would make them board-specific. Similarly with SPI master drivers.
409 * Device registration normally goes into like arch/.../mach.../board-YYY.c
410 * with other readonly (flashable) information about mainboard devices.
414 struct list_head list
;
415 struct spi_board_info board_info
;
418 static LIST_HEAD(board_list
);
419 static LIST_HEAD(spi_master_list
);
422 * Used to protect add/del opertion for board_info list and
423 * spi_master list, and their matching process
425 static DEFINE_MUTEX(board_lock
);
428 * spi_alloc_device - Allocate a new SPI device
429 * @master: Controller to which device is connected
432 * Allows a driver to allocate and initialize a spi_device without
433 * registering it immediately. This allows a driver to directly
434 * fill the spi_device with device parameters before calling
435 * spi_add_device() on it.
437 * Caller is responsible to call spi_add_device() on the returned
438 * spi_device structure to add it to the SPI master. If the caller
439 * needs to discard the spi_device without adding it, then it should
440 * call spi_dev_put() on it.
442 * Return: a pointer to the new device, or NULL.
444 struct spi_device
*spi_alloc_device(struct spi_master
*master
)
446 struct spi_device
*spi
;
448 if (!spi_master_get(master
))
451 spi
= kzalloc(sizeof(*spi
), GFP_KERNEL
);
453 spi_master_put(master
);
457 spi
->master
= master
;
458 spi
->dev
.parent
= &master
->dev
;
459 spi
->dev
.bus
= &spi_bus_type
;
460 spi
->dev
.release
= spidev_release
;
461 spi
->cs_gpio
= -ENOENT
;
463 spin_lock_init(&spi
->statistics
.lock
);
465 device_initialize(&spi
->dev
);
468 EXPORT_SYMBOL_GPL(spi_alloc_device
);
470 static void spi_dev_set_name(struct spi_device
*spi
)
472 struct acpi_device
*adev
= ACPI_COMPANION(&spi
->dev
);
475 dev_set_name(&spi
->dev
, "spi-%s", acpi_dev_name(adev
));
479 dev_set_name(&spi
->dev
, "%s.%u", dev_name(&spi
->master
->dev
),
483 static int spi_dev_check(struct device
*dev
, void *data
)
485 struct spi_device
*spi
= to_spi_device(dev
);
486 struct spi_device
*new_spi
= data
;
488 if (spi
->master
== new_spi
->master
&&
489 spi
->chip_select
== new_spi
->chip_select
)
495 * spi_add_device - Add spi_device allocated with spi_alloc_device
496 * @spi: spi_device to register
498 * Companion function to spi_alloc_device. Devices allocated with
499 * spi_alloc_device can be added onto the spi bus with this function.
501 * Return: 0 on success; negative errno on failure
503 int spi_add_device(struct spi_device
*spi
)
505 static DEFINE_MUTEX(spi_add_lock
);
506 struct spi_master
*master
= spi
->master
;
507 struct device
*dev
= master
->dev
.parent
;
510 /* Chipselects are numbered 0..max; validate. */
511 if (spi
->chip_select
>= master
->num_chipselect
) {
512 dev_err(dev
, "cs%d >= max %d\n",
514 master
->num_chipselect
);
518 /* Set the bus ID string */
519 spi_dev_set_name(spi
);
521 /* We need to make sure there's no other device with this
522 * chipselect **BEFORE** we call setup(), else we'll trash
523 * its configuration. Lock against concurrent add() calls.
525 mutex_lock(&spi_add_lock
);
527 status
= bus_for_each_dev(&spi_bus_type
, NULL
, spi
, spi_dev_check
);
529 dev_err(dev
, "chipselect %d already in use\n",
534 if (master
->cs_gpios
)
535 spi
->cs_gpio
= master
->cs_gpios
[spi
->chip_select
];
537 /* Drivers may modify this initial i/o setup, but will
538 * normally rely on the device being setup. Devices
539 * using SPI_CS_HIGH can't coexist well otherwise...
541 status
= spi_setup(spi
);
543 dev_err(dev
, "can't setup %s, status %d\n",
544 dev_name(&spi
->dev
), status
);
548 /* Device may be bound to an active driver when this returns */
549 status
= device_add(&spi
->dev
);
551 dev_err(dev
, "can't add %s, status %d\n",
552 dev_name(&spi
->dev
), status
);
554 dev_dbg(dev
, "registered child %s\n", dev_name(&spi
->dev
));
557 mutex_unlock(&spi_add_lock
);
560 EXPORT_SYMBOL_GPL(spi_add_device
);
563 * spi_new_device - instantiate one new SPI device
564 * @master: Controller to which device is connected
565 * @chip: Describes the SPI device
568 * On typical mainboards, this is purely internal; and it's not needed
569 * after board init creates the hard-wired devices. Some development
570 * platforms may not be able to use spi_register_board_info though, and
571 * this is exported so that for example a USB or parport based adapter
572 * driver could add devices (which it would learn about out-of-band).
574 * Return: the new device, or NULL.
576 struct spi_device
*spi_new_device(struct spi_master
*master
,
577 struct spi_board_info
*chip
)
579 struct spi_device
*proxy
;
582 /* NOTE: caller did any chip->bus_num checks necessary.
584 * Also, unless we change the return value convention to use
585 * error-or-pointer (not NULL-or-pointer), troubleshootability
586 * suggests syslogged diagnostics are best here (ugh).
589 proxy
= spi_alloc_device(master
);
593 WARN_ON(strlen(chip
->modalias
) >= sizeof(proxy
->modalias
));
595 proxy
->chip_select
= chip
->chip_select
;
596 proxy
->max_speed_hz
= chip
->max_speed_hz
;
597 proxy
->mode
= chip
->mode
;
598 proxy
->irq
= chip
->irq
;
599 strlcpy(proxy
->modalias
, chip
->modalias
, sizeof(proxy
->modalias
));
600 proxy
->dev
.platform_data
= (void *) chip
->platform_data
;
601 proxy
->controller_data
= chip
->controller_data
;
602 proxy
->controller_state
= NULL
;
604 if (chip
->properties
) {
605 status
= device_add_properties(&proxy
->dev
, chip
->properties
);
607 dev_err(&master
->dev
,
608 "failed to add properties to '%s': %d\n",
609 chip
->modalias
, status
);
614 status
= spi_add_device(proxy
);
616 goto err_remove_props
;
621 if (chip
->properties
)
622 device_remove_properties(&proxy
->dev
);
627 EXPORT_SYMBOL_GPL(spi_new_device
);
630 * spi_unregister_device - unregister a single SPI device
631 * @spi: spi_device to unregister
633 * Start making the passed SPI device vanish. Normally this would be handled
634 * by spi_unregister_master().
636 void spi_unregister_device(struct spi_device
*spi
)
641 if (spi
->dev
.of_node
) {
642 of_node_clear_flag(spi
->dev
.of_node
, OF_POPULATED
);
643 of_node_put(spi
->dev
.of_node
);
645 if (ACPI_COMPANION(&spi
->dev
))
646 acpi_device_clear_enumerated(ACPI_COMPANION(&spi
->dev
));
647 device_unregister(&spi
->dev
);
649 EXPORT_SYMBOL_GPL(spi_unregister_device
);
651 static void spi_match_master_to_boardinfo(struct spi_master
*master
,
652 struct spi_board_info
*bi
)
654 struct spi_device
*dev
;
656 if (master
->bus_num
!= bi
->bus_num
)
659 dev
= spi_new_device(master
, bi
);
661 dev_err(master
->dev
.parent
, "can't create new device for %s\n",
666 * spi_register_board_info - register SPI devices for a given board
667 * @info: array of chip descriptors
668 * @n: how many descriptors are provided
671 * Board-specific early init code calls this (probably during arch_initcall)
672 * with segments of the SPI device table. Any device nodes are created later,
673 * after the relevant parent SPI controller (bus_num) is defined. We keep
674 * this table of devices forever, so that reloading a controller driver will
675 * not make Linux forget about these hard-wired devices.
677 * Other code can also call this, e.g. a particular add-on board might provide
678 * SPI devices through its expansion connector, so code initializing that board
679 * would naturally declare its SPI devices.
681 * The board info passed can safely be __initdata ... but be careful of
682 * any embedded pointers (platform_data, etc), they're copied as-is.
683 * Device properties are deep-copied though.
685 * Return: zero on success, else a negative error code.
687 int spi_register_board_info(struct spi_board_info
const *info
, unsigned n
)
689 struct boardinfo
*bi
;
695 bi
= kcalloc(n
, sizeof(*bi
), GFP_KERNEL
);
699 for (i
= 0; i
< n
; i
++, bi
++, info
++) {
700 struct spi_master
*master
;
702 memcpy(&bi
->board_info
, info
, sizeof(*info
));
703 if (info
->properties
) {
704 bi
->board_info
.properties
=
705 property_entries_dup(info
->properties
);
706 if (IS_ERR(bi
->board_info
.properties
))
707 return PTR_ERR(bi
->board_info
.properties
);
710 mutex_lock(&board_lock
);
711 list_add_tail(&bi
->list
, &board_list
);
712 list_for_each_entry(master
, &spi_master_list
, list
)
713 spi_match_master_to_boardinfo(master
, &bi
->board_info
);
714 mutex_unlock(&board_lock
);
720 /*-------------------------------------------------------------------------*/
722 static void spi_set_cs(struct spi_device
*spi
, bool enable
)
724 if (spi
->mode
& SPI_CS_HIGH
)
727 if (gpio_is_valid(spi
->cs_gpio
)) {
728 gpio_set_value(spi
->cs_gpio
, !enable
);
729 /* Some SPI masters need both GPIO CS & slave_select */
730 if ((spi
->master
->flags
& SPI_MASTER_GPIO_SS
) &&
732 spi
->master
->set_cs(spi
, !enable
);
733 } else if (spi
->master
->set_cs
) {
734 spi
->master
->set_cs(spi
, !enable
);
738 #ifdef CONFIG_HAS_DMA
739 static int spi_map_buf(struct spi_master
*master
, struct device
*dev
,
740 struct sg_table
*sgt
, void *buf
, size_t len
,
741 enum dma_data_direction dir
)
743 const bool vmalloced_buf
= is_vmalloc_addr(buf
);
744 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
745 #ifdef CONFIG_HIGHMEM
746 const bool kmap_buf
= ((unsigned long)buf
>= PKMAP_BASE
&&
747 (unsigned long)buf
< (PKMAP_BASE
+
748 (LAST_PKMAP
* PAGE_SIZE
)));
750 const bool kmap_buf
= false;
754 struct page
*vm_page
;
755 struct scatterlist
*sg
;
760 if (vmalloced_buf
|| kmap_buf
) {
761 desc_len
= min_t(int, max_seg_size
, PAGE_SIZE
);
762 sgs
= DIV_ROUND_UP(len
+ offset_in_page(buf
), desc_len
);
763 } else if (virt_addr_valid(buf
)) {
764 desc_len
= min_t(int, max_seg_size
, master
->max_dma_len
);
765 sgs
= DIV_ROUND_UP(len
, desc_len
);
770 ret
= sg_alloc_table(sgt
, sgs
, GFP_KERNEL
);
775 for (i
= 0; i
< sgs
; i
++) {
777 if (vmalloced_buf
|| kmap_buf
) {
779 len
, desc_len
- offset_in_page(buf
));
781 vm_page
= vmalloc_to_page(buf
);
783 vm_page
= kmap_to_page(buf
);
788 sg_set_page(sg
, vm_page
,
789 min
, offset_in_page(buf
));
791 min
= min_t(size_t, len
, desc_len
);
793 sg_set_buf(sg
, sg_buf
, min
);
801 ret
= dma_map_sg(dev
, sgt
->sgl
, sgt
->nents
, dir
);
814 static void spi_unmap_buf(struct spi_master
*master
, struct device
*dev
,
815 struct sg_table
*sgt
, enum dma_data_direction dir
)
817 if (sgt
->orig_nents
) {
818 dma_unmap_sg(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
823 static int __spi_map_msg(struct spi_master
*master
, struct spi_message
*msg
)
825 struct device
*tx_dev
, *rx_dev
;
826 struct spi_transfer
*xfer
;
829 if (!master
->can_dma
)
833 tx_dev
= master
->dma_tx
->device
->dev
;
835 tx_dev
= master
->dev
.parent
;
838 rx_dev
= master
->dma_rx
->device
->dev
;
840 rx_dev
= master
->dev
.parent
;
842 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
843 if (!master
->can_dma(master
, msg
->spi
, xfer
))
846 if (xfer
->tx_buf
!= NULL
) {
847 ret
= spi_map_buf(master
, tx_dev
, &xfer
->tx_sg
,
848 (void *)xfer
->tx_buf
, xfer
->len
,
854 if (xfer
->rx_buf
!= NULL
) {
855 ret
= spi_map_buf(master
, rx_dev
, &xfer
->rx_sg
,
856 xfer
->rx_buf
, xfer
->len
,
859 spi_unmap_buf(master
, tx_dev
, &xfer
->tx_sg
,
866 master
->cur_msg_mapped
= true;
871 static int __spi_unmap_msg(struct spi_master
*master
, struct spi_message
*msg
)
873 struct spi_transfer
*xfer
;
874 struct device
*tx_dev
, *rx_dev
;
876 if (!master
->cur_msg_mapped
|| !master
->can_dma
)
880 tx_dev
= master
->dma_tx
->device
->dev
;
882 tx_dev
= master
->dev
.parent
;
885 rx_dev
= master
->dma_rx
->device
->dev
;
887 rx_dev
= master
->dev
.parent
;
889 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
890 if (!master
->can_dma(master
, msg
->spi
, xfer
))
893 spi_unmap_buf(master
, rx_dev
, &xfer
->rx_sg
, DMA_FROM_DEVICE
);
894 spi_unmap_buf(master
, tx_dev
, &xfer
->tx_sg
, DMA_TO_DEVICE
);
899 #else /* !CONFIG_HAS_DMA */
900 static inline int spi_map_buf(struct spi_master
*master
,
901 struct device
*dev
, struct sg_table
*sgt
,
902 void *buf
, size_t len
,
903 enum dma_data_direction dir
)
908 static inline void spi_unmap_buf(struct spi_master
*master
,
909 struct device
*dev
, struct sg_table
*sgt
,
910 enum dma_data_direction dir
)
914 static inline int __spi_map_msg(struct spi_master
*master
,
915 struct spi_message
*msg
)
920 static inline int __spi_unmap_msg(struct spi_master
*master
,
921 struct spi_message
*msg
)
925 #endif /* !CONFIG_HAS_DMA */
927 static inline int spi_unmap_msg(struct spi_master
*master
,
928 struct spi_message
*msg
)
930 struct spi_transfer
*xfer
;
932 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
934 * Restore the original value of tx_buf or rx_buf if they are
937 if (xfer
->tx_buf
== master
->dummy_tx
)
939 if (xfer
->rx_buf
== master
->dummy_rx
)
943 return __spi_unmap_msg(master
, msg
);
946 static int spi_map_msg(struct spi_master
*master
, struct spi_message
*msg
)
948 struct spi_transfer
*xfer
;
950 unsigned int max_tx
, max_rx
;
952 if (master
->flags
& (SPI_MASTER_MUST_RX
| SPI_MASTER_MUST_TX
)) {
956 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
957 if ((master
->flags
& SPI_MASTER_MUST_TX
) &&
959 max_tx
= max(xfer
->len
, max_tx
);
960 if ((master
->flags
& SPI_MASTER_MUST_RX
) &&
962 max_rx
= max(xfer
->len
, max_rx
);
966 tmp
= krealloc(master
->dummy_tx
, max_tx
,
967 GFP_KERNEL
| GFP_DMA
);
970 master
->dummy_tx
= tmp
;
971 memset(tmp
, 0, max_tx
);
975 tmp
= krealloc(master
->dummy_rx
, max_rx
,
976 GFP_KERNEL
| GFP_DMA
);
979 master
->dummy_rx
= tmp
;
982 if (max_tx
|| max_rx
) {
983 list_for_each_entry(xfer
, &msg
->transfers
,
986 xfer
->tx_buf
= master
->dummy_tx
;
988 xfer
->rx_buf
= master
->dummy_rx
;
993 return __spi_map_msg(master
, msg
);
997 * spi_transfer_one_message - Default implementation of transfer_one_message()
999 * This is a standard implementation of transfer_one_message() for
1000 * drivers which implement a transfer_one() operation. It provides
1001 * standard handling of delays and chip select management.
1003 static int spi_transfer_one_message(struct spi_master
*master
,
1004 struct spi_message
*msg
)
1006 struct spi_transfer
*xfer
;
1007 bool keep_cs
= false;
1009 unsigned long long ms
= 1;
1010 struct spi_statistics
*statm
= &master
->statistics
;
1011 struct spi_statistics
*stats
= &msg
->spi
->statistics
;
1013 spi_set_cs(msg
->spi
, true);
1015 SPI_STATISTICS_INCREMENT_FIELD(statm
, messages
);
1016 SPI_STATISTICS_INCREMENT_FIELD(stats
, messages
);
1018 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
1019 trace_spi_transfer_start(msg
, xfer
);
1021 spi_statistics_add_transfer_stats(statm
, xfer
, master
);
1022 spi_statistics_add_transfer_stats(stats
, xfer
, master
);
1024 if (xfer
->tx_buf
|| xfer
->rx_buf
) {
1025 reinit_completion(&master
->xfer_completion
);
1027 ret
= master
->transfer_one(master
, msg
->spi
, xfer
);
1029 SPI_STATISTICS_INCREMENT_FIELD(statm
,
1031 SPI_STATISTICS_INCREMENT_FIELD(stats
,
1033 dev_err(&msg
->spi
->dev
,
1034 "SPI transfer failed: %d\n", ret
);
1040 ms
= 8LL * 1000LL * xfer
->len
;
1041 do_div(ms
, xfer
->speed_hz
);
1042 ms
+= ms
+ 200; /* some tolerance */
1047 ms
= wait_for_completion_timeout(&master
->xfer_completion
,
1048 msecs_to_jiffies(ms
));
1052 SPI_STATISTICS_INCREMENT_FIELD(statm
,
1054 SPI_STATISTICS_INCREMENT_FIELD(stats
,
1056 dev_err(&msg
->spi
->dev
,
1057 "SPI transfer timed out\n");
1058 msg
->status
= -ETIMEDOUT
;
1062 dev_err(&msg
->spi
->dev
,
1063 "Bufferless transfer has length %u\n",
1067 trace_spi_transfer_stop(msg
, xfer
);
1069 if (msg
->status
!= -EINPROGRESS
)
1072 if (xfer
->delay_usecs
) {
1073 u16 us
= xfer
->delay_usecs
;
1078 usleep_range(us
, us
+ DIV_ROUND_UP(us
, 10));
1081 if (xfer
->cs_change
) {
1082 if (list_is_last(&xfer
->transfer_list
,
1086 spi_set_cs(msg
->spi
, false);
1088 spi_set_cs(msg
->spi
, true);
1092 msg
->actual_length
+= xfer
->len
;
1096 if (ret
!= 0 || !keep_cs
)
1097 spi_set_cs(msg
->spi
, false);
1099 if (msg
->status
== -EINPROGRESS
)
1102 if (msg
->status
&& master
->handle_err
)
1103 master
->handle_err(master
, msg
);
1105 spi_res_release(master
, msg
);
1107 spi_finalize_current_message(master
);
1113 * spi_finalize_current_transfer - report completion of a transfer
1114 * @master: the master reporting completion
1116 * Called by SPI drivers using the core transfer_one_message()
1117 * implementation to notify it that the current interrupt driven
1118 * transfer has finished and the next one may be scheduled.
1120 void spi_finalize_current_transfer(struct spi_master
*master
)
1122 complete(&master
->xfer_completion
);
1124 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer
);
1127 * __spi_pump_messages - function which processes spi message queue
1128 * @master: master to process queue for
1129 * @in_kthread: true if we are in the context of the message pump thread
1131 * This function checks if there is any spi message in the queue that
1132 * needs processing and if so call out to the driver to initialize hardware
1133 * and transfer each message.
1135 * Note that it is called both from the kthread itself and also from
1136 * inside spi_sync(); the queue extraction handling at the top of the
1137 * function should deal with this safely.
1139 static void __spi_pump_messages(struct spi_master
*master
, bool in_kthread
)
1141 unsigned long flags
;
1142 bool was_busy
= false;
1146 spin_lock_irqsave(&master
->queue_lock
, flags
);
1148 /* Make sure we are not already running a message */
1149 if (master
->cur_msg
) {
1150 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1154 /* If another context is idling the device then defer */
1155 if (master
->idling
) {
1156 kthread_queue_work(&master
->kworker
, &master
->pump_messages
);
1157 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1161 /* Check if the queue is idle */
1162 if (list_empty(&master
->queue
) || !master
->running
) {
1163 if (!master
->busy
) {
1164 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1168 /* Only do teardown in the thread */
1170 kthread_queue_work(&master
->kworker
,
1171 &master
->pump_messages
);
1172 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1176 master
->busy
= false;
1177 master
->idling
= true;
1178 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1180 kfree(master
->dummy_rx
);
1181 master
->dummy_rx
= NULL
;
1182 kfree(master
->dummy_tx
);
1183 master
->dummy_tx
= NULL
;
1184 if (master
->unprepare_transfer_hardware
&&
1185 master
->unprepare_transfer_hardware(master
))
1186 dev_err(&master
->dev
,
1187 "failed to unprepare transfer hardware\n");
1188 if (master
->auto_runtime_pm
) {
1189 pm_runtime_mark_last_busy(master
->dev
.parent
);
1190 pm_runtime_put_autosuspend(master
->dev
.parent
);
1192 trace_spi_master_idle(master
);
1194 spin_lock_irqsave(&master
->queue_lock
, flags
);
1195 master
->idling
= false;
1196 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1200 /* Extract head of queue */
1202 list_first_entry(&master
->queue
, struct spi_message
, queue
);
1204 list_del_init(&master
->cur_msg
->queue
);
1208 master
->busy
= true;
1209 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1211 mutex_lock(&master
->io_mutex
);
1213 if (!was_busy
&& master
->auto_runtime_pm
) {
1214 ret
= pm_runtime_get_sync(master
->dev
.parent
);
1216 dev_err(&master
->dev
, "Failed to power device: %d\n",
1218 mutex_unlock(&master
->io_mutex
);
1224 trace_spi_master_busy(master
);
1226 if (!was_busy
&& master
->prepare_transfer_hardware
) {
1227 ret
= master
->prepare_transfer_hardware(master
);
1229 dev_err(&master
->dev
,
1230 "failed to prepare transfer hardware\n");
1232 if (master
->auto_runtime_pm
)
1233 pm_runtime_put(master
->dev
.parent
);
1234 mutex_unlock(&master
->io_mutex
);
1239 trace_spi_message_start(master
->cur_msg
);
1241 if (master
->prepare_message
) {
1242 ret
= master
->prepare_message(master
, master
->cur_msg
);
1244 dev_err(&master
->dev
,
1245 "failed to prepare message: %d\n", ret
);
1246 master
->cur_msg
->status
= ret
;
1247 spi_finalize_current_message(master
);
1250 master
->cur_msg_prepared
= true;
1253 ret
= spi_map_msg(master
, master
->cur_msg
);
1255 master
->cur_msg
->status
= ret
;
1256 spi_finalize_current_message(master
);
1260 ret
= master
->transfer_one_message(master
, master
->cur_msg
);
1262 dev_err(&master
->dev
,
1263 "failed to transfer one message from queue\n");
1268 mutex_unlock(&master
->io_mutex
);
1270 /* Prod the scheduler in case transfer_one() was busy waiting */
1276 * spi_pump_messages - kthread work function which processes spi message queue
1277 * @work: pointer to kthread work struct contained in the master struct
1279 static void spi_pump_messages(struct kthread_work
*work
)
1281 struct spi_master
*master
=
1282 container_of(work
, struct spi_master
, pump_messages
);
1284 __spi_pump_messages(master
, true);
1287 static int spi_init_queue(struct spi_master
*master
)
1289 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
- 1 };
1291 master
->running
= false;
1292 master
->busy
= false;
1294 kthread_init_worker(&master
->kworker
);
1295 master
->kworker_task
= kthread_run(kthread_worker_fn
,
1296 &master
->kworker
, "%s",
1297 dev_name(&master
->dev
));
1298 if (IS_ERR(master
->kworker_task
)) {
1299 dev_err(&master
->dev
, "failed to create message pump task\n");
1300 return PTR_ERR(master
->kworker_task
);
1302 kthread_init_work(&master
->pump_messages
, spi_pump_messages
);
1305 * Master config will indicate if this controller should run the
1306 * message pump with high (realtime) priority to reduce the transfer
1307 * latency on the bus by minimising the delay between a transfer
1308 * request and the scheduling of the message pump thread. Without this
1309 * setting the message pump thread will remain at default priority.
1312 dev_info(&master
->dev
,
1313 "will run message pump with realtime priority\n");
1314 sched_setscheduler(master
->kworker_task
, SCHED_FIFO
, ¶m
);
1321 * spi_get_next_queued_message() - called by driver to check for queued
1323 * @master: the master to check for queued messages
1325 * If there are more messages in the queue, the next message is returned from
1328 * Return: the next message in the queue, else NULL if the queue is empty.
1330 struct spi_message
*spi_get_next_queued_message(struct spi_master
*master
)
1332 struct spi_message
*next
;
1333 unsigned long flags
;
1335 /* get a pointer to the next message, if any */
1336 spin_lock_irqsave(&master
->queue_lock
, flags
);
1337 next
= list_first_entry_or_null(&master
->queue
, struct spi_message
,
1339 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1343 EXPORT_SYMBOL_GPL(spi_get_next_queued_message
);
1346 * spi_finalize_current_message() - the current message is complete
1347 * @master: the master to return the message to
1349 * Called by the driver to notify the core that the message in the front of the
1350 * queue is complete and can be removed from the queue.
1352 void spi_finalize_current_message(struct spi_master
*master
)
1354 struct spi_message
*mesg
;
1355 unsigned long flags
;
1358 spin_lock_irqsave(&master
->queue_lock
, flags
);
1359 mesg
= master
->cur_msg
;
1360 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1362 spi_unmap_msg(master
, mesg
);
1364 if (master
->cur_msg_prepared
&& master
->unprepare_message
) {
1365 ret
= master
->unprepare_message(master
, mesg
);
1367 dev_err(&master
->dev
,
1368 "failed to unprepare message: %d\n", ret
);
1372 spin_lock_irqsave(&master
->queue_lock
, flags
);
1373 master
->cur_msg
= NULL
;
1374 master
->cur_msg_prepared
= false;
1375 kthread_queue_work(&master
->kworker
, &master
->pump_messages
);
1376 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1378 trace_spi_message_done(mesg
);
1382 mesg
->complete(mesg
->context
);
1384 EXPORT_SYMBOL_GPL(spi_finalize_current_message
);
1386 static int spi_start_queue(struct spi_master
*master
)
1388 unsigned long flags
;
1390 spin_lock_irqsave(&master
->queue_lock
, flags
);
1392 if (master
->running
|| master
->busy
) {
1393 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1397 master
->running
= true;
1398 master
->cur_msg
= NULL
;
1399 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1401 kthread_queue_work(&master
->kworker
, &master
->pump_messages
);
1406 static int spi_stop_queue(struct spi_master
*master
)
1408 unsigned long flags
;
1409 unsigned limit
= 500;
1412 spin_lock_irqsave(&master
->queue_lock
, flags
);
1415 * This is a bit lame, but is optimized for the common execution path.
1416 * A wait_queue on the master->busy could be used, but then the common
1417 * execution path (pump_messages) would be required to call wake_up or
1418 * friends on every SPI message. Do this instead.
1420 while ((!list_empty(&master
->queue
) || master
->busy
) && limit
--) {
1421 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1422 usleep_range(10000, 11000);
1423 spin_lock_irqsave(&master
->queue_lock
, flags
);
1426 if (!list_empty(&master
->queue
) || master
->busy
)
1429 master
->running
= false;
1431 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1434 dev_warn(&master
->dev
,
1435 "could not stop message queue\n");
1441 static int spi_destroy_queue(struct spi_master
*master
)
1445 ret
= spi_stop_queue(master
);
1448 * kthread_flush_worker will block until all work is done.
1449 * If the reason that stop_queue timed out is that the work will never
1450 * finish, then it does no good to call flush/stop thread, so
1454 dev_err(&master
->dev
, "problem destroying queue\n");
1458 kthread_flush_worker(&master
->kworker
);
1459 kthread_stop(master
->kworker_task
);
1464 static int __spi_queued_transfer(struct spi_device
*spi
,
1465 struct spi_message
*msg
,
1468 struct spi_master
*master
= spi
->master
;
1469 unsigned long flags
;
1471 spin_lock_irqsave(&master
->queue_lock
, flags
);
1473 if (!master
->running
) {
1474 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1477 msg
->actual_length
= 0;
1478 msg
->status
= -EINPROGRESS
;
1480 list_add_tail(&msg
->queue
, &master
->queue
);
1481 if (!master
->busy
&& need_pump
)
1482 kthread_queue_work(&master
->kworker
, &master
->pump_messages
);
1484 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1489 * spi_queued_transfer - transfer function for queued transfers
1490 * @spi: spi device which is requesting transfer
1491 * @msg: spi message which is to handled is queued to driver queue
1493 * Return: zero on success, else a negative error code.
1495 static int spi_queued_transfer(struct spi_device
*spi
, struct spi_message
*msg
)
1497 return __spi_queued_transfer(spi
, msg
, true);
1500 static int spi_master_initialize_queue(struct spi_master
*master
)
1504 master
->transfer
= spi_queued_transfer
;
1505 if (!master
->transfer_one_message
)
1506 master
->transfer_one_message
= spi_transfer_one_message
;
1508 /* Initialize and start queue */
1509 ret
= spi_init_queue(master
);
1511 dev_err(&master
->dev
, "problem initializing queue\n");
1512 goto err_init_queue
;
1514 master
->queued
= true;
1515 ret
= spi_start_queue(master
);
1517 dev_err(&master
->dev
, "problem starting queue\n");
1518 goto err_start_queue
;
1524 spi_destroy_queue(master
);
1529 /*-------------------------------------------------------------------------*/
1531 #if defined(CONFIG_OF)
1532 static int of_spi_parse_dt(struct spi_master
*master
, struct spi_device
*spi
,
1533 struct device_node
*nc
)
1538 /* Device address */
1539 rc
= of_property_read_u32(nc
, "reg", &value
);
1541 dev_err(&master
->dev
, "%s has no valid 'reg' property (%d)\n",
1545 spi
->chip_select
= value
;
1547 /* Mode (clock phase/polarity/etc.) */
1548 if (of_find_property(nc
, "spi-cpha", NULL
))
1549 spi
->mode
|= SPI_CPHA
;
1550 if (of_find_property(nc
, "spi-cpol", NULL
))
1551 spi
->mode
|= SPI_CPOL
;
1552 if (of_find_property(nc
, "spi-cs-high", NULL
))
1553 spi
->mode
|= SPI_CS_HIGH
;
1554 if (of_find_property(nc
, "spi-3wire", NULL
))
1555 spi
->mode
|= SPI_3WIRE
;
1556 if (of_find_property(nc
, "spi-lsb-first", NULL
))
1557 spi
->mode
|= SPI_LSB_FIRST
;
1559 /* Device DUAL/QUAD mode */
1560 if (!of_property_read_u32(nc
, "spi-tx-bus-width", &value
)) {
1565 spi
->mode
|= SPI_TX_DUAL
;
1568 spi
->mode
|= SPI_TX_QUAD
;
1571 dev_warn(&master
->dev
,
1572 "spi-tx-bus-width %d not supported\n",
1578 if (!of_property_read_u32(nc
, "spi-rx-bus-width", &value
)) {
1583 spi
->mode
|= SPI_RX_DUAL
;
1586 spi
->mode
|= SPI_RX_QUAD
;
1589 dev_warn(&master
->dev
,
1590 "spi-rx-bus-width %d not supported\n",
1597 rc
= of_property_read_u32(nc
, "spi-max-frequency", &value
);
1599 dev_err(&master
->dev
, "%s has no valid 'spi-max-frequency' property (%d)\n",
1603 spi
->max_speed_hz
= value
;
1608 static struct spi_device
*
1609 of_register_spi_device(struct spi_master
*master
, struct device_node
*nc
)
1611 struct spi_device
*spi
;
1614 /* Alloc an spi_device */
1615 spi
= spi_alloc_device(master
);
1617 dev_err(&master
->dev
, "spi_device alloc error for %s\n",
1623 /* Select device driver */
1624 rc
= of_modalias_node(nc
, spi
->modalias
,
1625 sizeof(spi
->modalias
));
1627 dev_err(&master
->dev
, "cannot find modalias for %s\n",
1632 rc
= of_spi_parse_dt(master
, spi
, nc
);
1636 /* Store a pointer to the node in the device structure */
1638 spi
->dev
.of_node
= nc
;
1640 /* Register the new device */
1641 rc
= spi_add_device(spi
);
1643 dev_err(&master
->dev
, "spi_device register error %s\n",
1645 goto err_of_node_put
;
1658 * of_register_spi_devices() - Register child devices onto the SPI bus
1659 * @master: Pointer to spi_master device
1661 * Registers an spi_device for each child node of master node which has a 'reg'
1664 static void of_register_spi_devices(struct spi_master
*master
)
1666 struct spi_device
*spi
;
1667 struct device_node
*nc
;
1669 if (!master
->dev
.of_node
)
1672 for_each_available_child_of_node(master
->dev
.of_node
, nc
) {
1673 if (of_node_test_and_set_flag(nc
, OF_POPULATED
))
1675 spi
= of_register_spi_device(master
, nc
);
1677 dev_warn(&master
->dev
, "Failed to create SPI device for %s\n",
1679 of_node_clear_flag(nc
, OF_POPULATED
);
1684 static void of_register_spi_devices(struct spi_master
*master
) { }
1688 static int acpi_spi_add_resource(struct acpi_resource
*ares
, void *data
)
1690 struct spi_device
*spi
= data
;
1691 struct spi_master
*master
= spi
->master
;
1693 if (ares
->type
== ACPI_RESOURCE_TYPE_SERIAL_BUS
) {
1694 struct acpi_resource_spi_serialbus
*sb
;
1696 sb
= &ares
->data
.spi_serial_bus
;
1697 if (sb
->type
== ACPI_RESOURCE_SERIAL_TYPE_SPI
) {
1699 * ACPI DeviceSelection numbering is handled by the
1700 * host controller driver in Windows and can vary
1701 * from driver to driver. In Linux we always expect
1702 * 0 .. max - 1 so we need to ask the driver to
1703 * translate between the two schemes.
1705 if (master
->fw_translate_cs
) {
1706 int cs
= master
->fw_translate_cs(master
,
1707 sb
->device_selection
);
1710 spi
->chip_select
= cs
;
1712 spi
->chip_select
= sb
->device_selection
;
1715 spi
->max_speed_hz
= sb
->connection_speed
;
1717 if (sb
->clock_phase
== ACPI_SPI_SECOND_PHASE
)
1718 spi
->mode
|= SPI_CPHA
;
1719 if (sb
->clock_polarity
== ACPI_SPI_START_HIGH
)
1720 spi
->mode
|= SPI_CPOL
;
1721 if (sb
->device_polarity
== ACPI_SPI_ACTIVE_HIGH
)
1722 spi
->mode
|= SPI_CS_HIGH
;
1724 } else if (spi
->irq
< 0) {
1727 if (acpi_dev_resource_interrupt(ares
, 0, &r
))
1731 /* Always tell the ACPI core to skip this resource */
1735 static acpi_status
acpi_register_spi_device(struct spi_master
*master
,
1736 struct acpi_device
*adev
)
1738 struct list_head resource_list
;
1739 struct spi_device
*spi
;
1742 if (acpi_bus_get_status(adev
) || !adev
->status
.present
||
1743 acpi_device_enumerated(adev
))
1746 spi
= spi_alloc_device(master
);
1748 dev_err(&master
->dev
, "failed to allocate SPI device for %s\n",
1749 dev_name(&adev
->dev
));
1750 return AE_NO_MEMORY
;
1753 ACPI_COMPANION_SET(&spi
->dev
, adev
);
1756 INIT_LIST_HEAD(&resource_list
);
1757 ret
= acpi_dev_get_resources(adev
, &resource_list
,
1758 acpi_spi_add_resource
, spi
);
1759 acpi_dev_free_resource_list(&resource_list
);
1761 if (ret
< 0 || !spi
->max_speed_hz
) {
1766 acpi_set_modalias(adev
, acpi_device_hid(adev
), spi
->modalias
,
1767 sizeof(spi
->modalias
));
1770 spi
->irq
= acpi_dev_gpio_irq_get(adev
, 0);
1772 acpi_device_set_enumerated(adev
);
1774 adev
->power
.flags
.ignore_parent
= true;
1775 if (spi_add_device(spi
)) {
1776 adev
->power
.flags
.ignore_parent
= false;
1777 dev_err(&master
->dev
, "failed to add SPI device %s from ACPI\n",
1778 dev_name(&adev
->dev
));
1785 static acpi_status
acpi_spi_add_device(acpi_handle handle
, u32 level
,
1786 void *data
, void **return_value
)
1788 struct spi_master
*master
= data
;
1789 struct acpi_device
*adev
;
1791 if (acpi_bus_get_device(handle
, &adev
))
1794 return acpi_register_spi_device(master
, adev
);
1797 static void acpi_register_spi_devices(struct spi_master
*master
)
1802 handle
= ACPI_HANDLE(master
->dev
.parent
);
1806 status
= acpi_walk_namespace(ACPI_TYPE_DEVICE
, handle
, 1,
1807 acpi_spi_add_device
, NULL
,
1809 if (ACPI_FAILURE(status
))
1810 dev_warn(&master
->dev
, "failed to enumerate SPI slaves\n");
1813 static inline void acpi_register_spi_devices(struct spi_master
*master
) {}
1814 #endif /* CONFIG_ACPI */
1816 static void spi_master_release(struct device
*dev
)
1818 struct spi_master
*master
;
1820 master
= container_of(dev
, struct spi_master
, dev
);
1824 static struct class spi_master_class
= {
1825 .name
= "spi_master",
1826 .owner
= THIS_MODULE
,
1827 .dev_release
= spi_master_release
,
1828 .dev_groups
= spi_master_groups
,
1833 * spi_alloc_master - allocate SPI master controller
1834 * @dev: the controller, possibly using the platform_bus
1835 * @size: how much zeroed driver-private data to allocate; the pointer to this
1836 * memory is in the driver_data field of the returned device,
1837 * accessible with spi_master_get_devdata().
1838 * Context: can sleep
1840 * This call is used only by SPI master controller drivers, which are the
1841 * only ones directly touching chip registers. It's how they allocate
1842 * an spi_master structure, prior to calling spi_register_master().
1844 * This must be called from context that can sleep.
1846 * The caller is responsible for assigning the bus number and initializing
1847 * the master's methods before calling spi_register_master(); and (after errors
1848 * adding the device) calling spi_master_put() to prevent a memory leak.
1850 * Return: the SPI master structure on success, else NULL.
1852 struct spi_master
*spi_alloc_master(struct device
*dev
, unsigned size
)
1854 struct spi_master
*master
;
1859 master
= kzalloc(size
+ sizeof(*master
), GFP_KERNEL
);
1863 device_initialize(&master
->dev
);
1864 master
->bus_num
= -1;
1865 master
->num_chipselect
= 1;
1866 master
->dev
.class = &spi_master_class
;
1867 master
->dev
.parent
= dev
;
1868 pm_suspend_ignore_children(&master
->dev
, true);
1869 spi_master_set_devdata(master
, &master
[1]);
1873 EXPORT_SYMBOL_GPL(spi_alloc_master
);
1876 static int of_spi_register_master(struct spi_master
*master
)
1879 struct device_node
*np
= master
->dev
.of_node
;
1884 nb
= of_gpio_named_count(np
, "cs-gpios");
1885 master
->num_chipselect
= max_t(int, nb
, master
->num_chipselect
);
1887 /* Return error only for an incorrectly formed cs-gpios property */
1888 if (nb
== 0 || nb
== -ENOENT
)
1893 cs
= devm_kzalloc(&master
->dev
,
1894 sizeof(int) * master
->num_chipselect
,
1896 master
->cs_gpios
= cs
;
1898 if (!master
->cs_gpios
)
1901 for (i
= 0; i
< master
->num_chipselect
; i
++)
1904 for (i
= 0; i
< nb
; i
++)
1905 cs
[i
] = of_get_named_gpio(np
, "cs-gpios", i
);
1910 static int of_spi_register_master(struct spi_master
*master
)
1917 * spi_register_master - register SPI master controller
1918 * @master: initialized master, originally from spi_alloc_master()
1919 * Context: can sleep
1921 * SPI master controllers connect to their drivers using some non-SPI bus,
1922 * such as the platform bus. The final stage of probe() in that code
1923 * includes calling spi_register_master() to hook up to this SPI bus glue.
1925 * SPI controllers use board specific (often SOC specific) bus numbers,
1926 * and board-specific addressing for SPI devices combines those numbers
1927 * with chip select numbers. Since SPI does not directly support dynamic
1928 * device identification, boards need configuration tables telling which
1929 * chip is at which address.
1931 * This must be called from context that can sleep. It returns zero on
1932 * success, else a negative error code (dropping the master's refcount).
1933 * After a successful return, the caller is responsible for calling
1934 * spi_unregister_master().
1936 * Return: zero on success, else a negative error code.
1938 int spi_register_master(struct spi_master
*master
)
1940 static atomic_t dyn_bus_id
= ATOMIC_INIT((1<<15) - 1);
1941 struct device
*dev
= master
->dev
.parent
;
1942 struct boardinfo
*bi
;
1943 int status
= -ENODEV
;
1949 status
= of_spi_register_master(master
);
1953 /* even if it's just one always-selected device, there must
1954 * be at least one chipselect
1956 if (master
->num_chipselect
== 0)
1959 if ((master
->bus_num
< 0) && master
->dev
.of_node
)
1960 master
->bus_num
= of_alias_get_id(master
->dev
.of_node
, "spi");
1962 /* convention: dynamically assigned bus IDs count down from the max */
1963 if (master
->bus_num
< 0) {
1964 /* FIXME switch to an IDR based scheme, something like
1965 * I2C now uses, so we can't run out of "dynamic" IDs
1967 master
->bus_num
= atomic_dec_return(&dyn_bus_id
);
1971 INIT_LIST_HEAD(&master
->queue
);
1972 spin_lock_init(&master
->queue_lock
);
1973 spin_lock_init(&master
->bus_lock_spinlock
);
1974 mutex_init(&master
->bus_lock_mutex
);
1975 mutex_init(&master
->io_mutex
);
1976 master
->bus_lock_flag
= 0;
1977 init_completion(&master
->xfer_completion
);
1978 if (!master
->max_dma_len
)
1979 master
->max_dma_len
= INT_MAX
;
1981 /* register the device, then userspace will see it.
1982 * registration fails if the bus ID is in use.
1984 dev_set_name(&master
->dev
, "spi%u", master
->bus_num
);
1985 status
= device_add(&master
->dev
);
1988 dev_dbg(dev
, "registered master %s%s\n", dev_name(&master
->dev
),
1989 dynamic
? " (dynamic)" : "");
1991 /* If we're using a queued driver, start the queue */
1992 if (master
->transfer
)
1993 dev_info(dev
, "master is unqueued, this is deprecated\n");
1995 status
= spi_master_initialize_queue(master
);
1997 device_del(&master
->dev
);
2001 /* add statistics */
2002 spin_lock_init(&master
->statistics
.lock
);
2004 mutex_lock(&board_lock
);
2005 list_add_tail(&master
->list
, &spi_master_list
);
2006 list_for_each_entry(bi
, &board_list
, list
)
2007 spi_match_master_to_boardinfo(master
, &bi
->board_info
);
2008 mutex_unlock(&board_lock
);
2010 /* Register devices from the device tree and ACPI */
2011 of_register_spi_devices(master
);
2012 acpi_register_spi_devices(master
);
2016 EXPORT_SYMBOL_GPL(spi_register_master
);
2018 static void devm_spi_unregister(struct device
*dev
, void *res
)
2020 spi_unregister_master(*(struct spi_master
**)res
);
2024 * dev_spi_register_master - register managed SPI master controller
2025 * @dev: device managing SPI master
2026 * @master: initialized master, originally from spi_alloc_master()
2027 * Context: can sleep
2029 * Register a SPI device as with spi_register_master() which will
2030 * automatically be unregister
2032 * Return: zero on success, else a negative error code.
2034 int devm_spi_register_master(struct device
*dev
, struct spi_master
*master
)
2036 struct spi_master
**ptr
;
2039 ptr
= devres_alloc(devm_spi_unregister
, sizeof(*ptr
), GFP_KERNEL
);
2043 ret
= spi_register_master(master
);
2046 devres_add(dev
, ptr
);
2053 EXPORT_SYMBOL_GPL(devm_spi_register_master
);
2055 static int __unregister(struct device
*dev
, void *null
)
2057 spi_unregister_device(to_spi_device(dev
));
2062 * spi_unregister_master - unregister SPI master controller
2063 * @master: the master being unregistered
2064 * Context: can sleep
2066 * This call is used only by SPI master controller drivers, which are the
2067 * only ones directly touching chip registers.
2069 * This must be called from context that can sleep.
2071 void spi_unregister_master(struct spi_master
*master
)
2075 if (master
->queued
) {
2076 if (spi_destroy_queue(master
))
2077 dev_err(&master
->dev
, "queue remove failed\n");
2080 mutex_lock(&board_lock
);
2081 list_del(&master
->list
);
2082 mutex_unlock(&board_lock
);
2084 dummy
= device_for_each_child(&master
->dev
, NULL
, __unregister
);
2085 device_unregister(&master
->dev
);
2087 EXPORT_SYMBOL_GPL(spi_unregister_master
);
2089 int spi_master_suspend(struct spi_master
*master
)
2093 /* Basically no-ops for non-queued masters */
2094 if (!master
->queued
)
2097 ret
= spi_stop_queue(master
);
2099 dev_err(&master
->dev
, "queue stop failed\n");
2103 EXPORT_SYMBOL_GPL(spi_master_suspend
);
2105 int spi_master_resume(struct spi_master
*master
)
2109 if (!master
->queued
)
2112 ret
= spi_start_queue(master
);
2114 dev_err(&master
->dev
, "queue restart failed\n");
2118 EXPORT_SYMBOL_GPL(spi_master_resume
);
2120 static int __spi_master_match(struct device
*dev
, const void *data
)
2122 struct spi_master
*m
;
2123 const u16
*bus_num
= data
;
2125 m
= container_of(dev
, struct spi_master
, dev
);
2126 return m
->bus_num
== *bus_num
;
2130 * spi_busnum_to_master - look up master associated with bus_num
2131 * @bus_num: the master's bus number
2132 * Context: can sleep
2134 * This call may be used with devices that are registered after
2135 * arch init time. It returns a refcounted pointer to the relevant
2136 * spi_master (which the caller must release), or NULL if there is
2137 * no such master registered.
2139 * Return: the SPI master structure on success, else NULL.
2141 struct spi_master
*spi_busnum_to_master(u16 bus_num
)
2144 struct spi_master
*master
= NULL
;
2146 dev
= class_find_device(&spi_master_class
, NULL
, &bus_num
,
2147 __spi_master_match
);
2149 master
= container_of(dev
, struct spi_master
, dev
);
2150 /* reference got in class_find_device */
2153 EXPORT_SYMBOL_GPL(spi_busnum_to_master
);
2155 /*-------------------------------------------------------------------------*/
2157 /* Core methods for SPI resource management */
2160 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2161 * during the processing of a spi_message while using
2163 * @spi: the spi device for which we allocate memory
2164 * @release: the release code to execute for this resource
2165 * @size: size to alloc and return
2166 * @gfp: GFP allocation flags
2168 * Return: the pointer to the allocated data
2170 * This may get enhanced in the future to allocate from a memory pool
2171 * of the @spi_device or @spi_master to avoid repeated allocations.
2173 void *spi_res_alloc(struct spi_device
*spi
,
2174 spi_res_release_t release
,
2175 size_t size
, gfp_t gfp
)
2177 struct spi_res
*sres
;
2179 sres
= kzalloc(sizeof(*sres
) + size
, gfp
);
2183 INIT_LIST_HEAD(&sres
->entry
);
2184 sres
->release
= release
;
2188 EXPORT_SYMBOL_GPL(spi_res_alloc
);
2191 * spi_res_free - free an spi resource
2192 * @res: pointer to the custom data of a resource
2195 void spi_res_free(void *res
)
2197 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2202 WARN_ON(!list_empty(&sres
->entry
));
2205 EXPORT_SYMBOL_GPL(spi_res_free
);
2208 * spi_res_add - add a spi_res to the spi_message
2209 * @message: the spi message
2210 * @res: the spi_resource
2212 void spi_res_add(struct spi_message
*message
, void *res
)
2214 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2216 WARN_ON(!list_empty(&sres
->entry
));
2217 list_add_tail(&sres
->entry
, &message
->resources
);
2219 EXPORT_SYMBOL_GPL(spi_res_add
);
2222 * spi_res_release - release all spi resources for this message
2223 * @master: the @spi_master
2224 * @message: the @spi_message
2226 void spi_res_release(struct spi_master
*master
,
2227 struct spi_message
*message
)
2229 struct spi_res
*res
;
2231 while (!list_empty(&message
->resources
)) {
2232 res
= list_last_entry(&message
->resources
,
2233 struct spi_res
, entry
);
2236 res
->release(master
, message
, res
->data
);
2238 list_del(&res
->entry
);
2243 EXPORT_SYMBOL_GPL(spi_res_release
);
2245 /*-------------------------------------------------------------------------*/
2247 /* Core methods for spi_message alterations */
2249 static void __spi_replace_transfers_release(struct spi_master
*master
,
2250 struct spi_message
*msg
,
2253 struct spi_replaced_transfers
*rxfer
= res
;
2256 /* call extra callback if requested */
2258 rxfer
->release(master
, msg
, res
);
2260 /* insert replaced transfers back into the message */
2261 list_splice(&rxfer
->replaced_transfers
, rxfer
->replaced_after
);
2263 /* remove the formerly inserted entries */
2264 for (i
= 0; i
< rxfer
->inserted
; i
++)
2265 list_del(&rxfer
->inserted_transfers
[i
].transfer_list
);
2269 * spi_replace_transfers - replace transfers with several transfers
2270 * and register change with spi_message.resources
2271 * @msg: the spi_message we work upon
2272 * @xfer_first: the first spi_transfer we want to replace
2273 * @remove: number of transfers to remove
2274 * @insert: the number of transfers we want to insert instead
2275 * @release: extra release code necessary in some circumstances
2276 * @extradatasize: extra data to allocate (with alignment guarantees
2277 * of struct @spi_transfer)
2280 * Returns: pointer to @spi_replaced_transfers,
2281 * PTR_ERR(...) in case of errors.
2283 struct spi_replaced_transfers
*spi_replace_transfers(
2284 struct spi_message
*msg
,
2285 struct spi_transfer
*xfer_first
,
2288 spi_replaced_release_t release
,
2289 size_t extradatasize
,
2292 struct spi_replaced_transfers
*rxfer
;
2293 struct spi_transfer
*xfer
;
2296 /* allocate the structure using spi_res */
2297 rxfer
= spi_res_alloc(msg
->spi
, __spi_replace_transfers_release
,
2298 insert
* sizeof(struct spi_transfer
)
2299 + sizeof(struct spi_replaced_transfers
)
2303 return ERR_PTR(-ENOMEM
);
2305 /* the release code to invoke before running the generic release */
2306 rxfer
->release
= release
;
2308 /* assign extradata */
2311 &rxfer
->inserted_transfers
[insert
];
2313 /* init the replaced_transfers list */
2314 INIT_LIST_HEAD(&rxfer
->replaced_transfers
);
2316 /* assign the list_entry after which we should reinsert
2317 * the @replaced_transfers - it may be spi_message.messages!
2319 rxfer
->replaced_after
= xfer_first
->transfer_list
.prev
;
2321 /* remove the requested number of transfers */
2322 for (i
= 0; i
< remove
; i
++) {
2323 /* if the entry after replaced_after it is msg->transfers
2324 * then we have been requested to remove more transfers
2325 * than are in the list
2327 if (rxfer
->replaced_after
->next
== &msg
->transfers
) {
2328 dev_err(&msg
->spi
->dev
,
2329 "requested to remove more spi_transfers than are available\n");
2330 /* insert replaced transfers back into the message */
2331 list_splice(&rxfer
->replaced_transfers
,
2332 rxfer
->replaced_after
);
2334 /* free the spi_replace_transfer structure */
2335 spi_res_free(rxfer
);
2337 /* and return with an error */
2338 return ERR_PTR(-EINVAL
);
2341 /* remove the entry after replaced_after from list of
2342 * transfers and add it to list of replaced_transfers
2344 list_move_tail(rxfer
->replaced_after
->next
,
2345 &rxfer
->replaced_transfers
);
2348 /* create copy of the given xfer with identical settings
2349 * based on the first transfer to get removed
2351 for (i
= 0; i
< insert
; i
++) {
2352 /* we need to run in reverse order */
2353 xfer
= &rxfer
->inserted_transfers
[insert
- 1 - i
];
2355 /* copy all spi_transfer data */
2356 memcpy(xfer
, xfer_first
, sizeof(*xfer
));
2359 list_add(&xfer
->transfer_list
, rxfer
->replaced_after
);
2361 /* clear cs_change and delay_usecs for all but the last */
2363 xfer
->cs_change
= false;
2364 xfer
->delay_usecs
= 0;
2368 /* set up inserted */
2369 rxfer
->inserted
= insert
;
2371 /* and register it with spi_res/spi_message */
2372 spi_res_add(msg
, rxfer
);
2376 EXPORT_SYMBOL_GPL(spi_replace_transfers
);
2378 static int __spi_split_transfer_maxsize(struct spi_master
*master
,
2379 struct spi_message
*msg
,
2380 struct spi_transfer
**xferp
,
2384 struct spi_transfer
*xfer
= *xferp
, *xfers
;
2385 struct spi_replaced_transfers
*srt
;
2389 /* warn once about this fact that we are splitting a transfer */
2390 dev_warn_once(&msg
->spi
->dev
,
2391 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2392 xfer
->len
, maxsize
);
2394 /* calculate how many we have to replace */
2395 count
= DIV_ROUND_UP(xfer
->len
, maxsize
);
2397 /* create replacement */
2398 srt
= spi_replace_transfers(msg
, xfer
, 1, count
, NULL
, 0, gfp
);
2400 return PTR_ERR(srt
);
2401 xfers
= srt
->inserted_transfers
;
2403 /* now handle each of those newly inserted spi_transfers
2404 * note that the replacements spi_transfers all are preset
2405 * to the same values as *xferp, so tx_buf, rx_buf and len
2406 * are all identical (as well as most others)
2407 * so we just have to fix up len and the pointers.
2409 * this also includes support for the depreciated
2410 * spi_message.is_dma_mapped interface
2413 /* the first transfer just needs the length modified, so we
2414 * run it outside the loop
2416 xfers
[0].len
= min_t(size_t, maxsize
, xfer
[0].len
);
2418 /* all the others need rx_buf/tx_buf also set */
2419 for (i
= 1, offset
= maxsize
; i
< count
; offset
+= maxsize
, i
++) {
2420 /* update rx_buf, tx_buf and dma */
2421 if (xfers
[i
].rx_buf
)
2422 xfers
[i
].rx_buf
+= offset
;
2423 if (xfers
[i
].rx_dma
)
2424 xfers
[i
].rx_dma
+= offset
;
2425 if (xfers
[i
].tx_buf
)
2426 xfers
[i
].tx_buf
+= offset
;
2427 if (xfers
[i
].tx_dma
)
2428 xfers
[i
].tx_dma
+= offset
;
2431 xfers
[i
].len
= min(maxsize
, xfers
[i
].len
- offset
);
2434 /* we set up xferp to the last entry we have inserted,
2435 * so that we skip those already split transfers
2437 *xferp
= &xfers
[count
- 1];
2439 /* increment statistics counters */
2440 SPI_STATISTICS_INCREMENT_FIELD(&master
->statistics
,
2441 transfers_split_maxsize
);
2442 SPI_STATISTICS_INCREMENT_FIELD(&msg
->spi
->statistics
,
2443 transfers_split_maxsize
);
2449 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2450 * when an individual transfer exceeds a
2452 * @master: the @spi_master for this transfer
2453 * @msg: the @spi_message to transform
2454 * @maxsize: the maximum when to apply this
2455 * @gfp: GFP allocation flags
2457 * Return: status of transformation
2459 int spi_split_transfers_maxsize(struct spi_master
*master
,
2460 struct spi_message
*msg
,
2464 struct spi_transfer
*xfer
;
2467 /* iterate over the transfer_list,
2468 * but note that xfer is advanced to the last transfer inserted
2469 * to avoid checking sizes again unnecessarily (also xfer does
2470 * potentiall belong to a different list by the time the
2471 * replacement has happened
2473 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
2474 if (xfer
->len
> maxsize
) {
2475 ret
= __spi_split_transfer_maxsize(
2476 master
, msg
, &xfer
, maxsize
, gfp
);
2484 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize
);
2486 /*-------------------------------------------------------------------------*/
2488 /* Core methods for SPI master protocol drivers. Some of the
2489 * other core methods are currently defined as inline functions.
2492 static int __spi_validate_bits_per_word(struct spi_master
*master
, u8 bits_per_word
)
2494 if (master
->bits_per_word_mask
) {
2495 /* Only 32 bits fit in the mask */
2496 if (bits_per_word
> 32)
2498 if (!(master
->bits_per_word_mask
&
2499 SPI_BPW_MASK(bits_per_word
)))
2507 * spi_setup - setup SPI mode and clock rate
2508 * @spi: the device whose settings are being modified
2509 * Context: can sleep, and no requests are queued to the device
2511 * SPI protocol drivers may need to update the transfer mode if the
2512 * device doesn't work with its default. They may likewise need
2513 * to update clock rates or word sizes from initial values. This function
2514 * changes those settings, and must be called from a context that can sleep.
2515 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2516 * effect the next time the device is selected and data is transferred to
2517 * or from it. When this function returns, the spi device is deselected.
2519 * Note that this call will fail if the protocol driver specifies an option
2520 * that the underlying controller or its driver does not support. For
2521 * example, not all hardware supports wire transfers using nine bit words,
2522 * LSB-first wire encoding, or active-high chipselects.
2524 * Return: zero on success, else a negative error code.
2526 int spi_setup(struct spi_device
*spi
)
2528 unsigned bad_bits
, ugly_bits
;
2531 /* check mode to prevent that DUAL and QUAD set at the same time
2533 if (((spi
->mode
& SPI_TX_DUAL
) && (spi
->mode
& SPI_TX_QUAD
)) ||
2534 ((spi
->mode
& SPI_RX_DUAL
) && (spi
->mode
& SPI_RX_QUAD
))) {
2536 "setup: can not select dual and quad at the same time\n");
2539 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2541 if ((spi
->mode
& SPI_3WIRE
) && (spi
->mode
&
2542 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
)))
2544 /* help drivers fail *cleanly* when they need options
2545 * that aren't supported with their current master
2547 bad_bits
= spi
->mode
& ~spi
->master
->mode_bits
;
2548 ugly_bits
= bad_bits
&
2549 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
);
2552 "setup: ignoring unsupported mode bits %x\n",
2554 spi
->mode
&= ~ugly_bits
;
2555 bad_bits
&= ~ugly_bits
;
2558 dev_err(&spi
->dev
, "setup: unsupported mode bits %x\n",
2563 if (!spi
->bits_per_word
)
2564 spi
->bits_per_word
= 8;
2566 status
= __spi_validate_bits_per_word(spi
->master
, spi
->bits_per_word
);
2570 if (!spi
->max_speed_hz
)
2571 spi
->max_speed_hz
= spi
->master
->max_speed_hz
;
2573 if (spi
->master
->setup
)
2574 status
= spi
->master
->setup(spi
);
2576 spi_set_cs(spi
, false);
2578 dev_dbg(&spi
->dev
, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2579 (int) (spi
->mode
& (SPI_CPOL
| SPI_CPHA
)),
2580 (spi
->mode
& SPI_CS_HIGH
) ? "cs_high, " : "",
2581 (spi
->mode
& SPI_LSB_FIRST
) ? "lsb, " : "",
2582 (spi
->mode
& SPI_3WIRE
) ? "3wire, " : "",
2583 (spi
->mode
& SPI_LOOP
) ? "loopback, " : "",
2584 spi
->bits_per_word
, spi
->max_speed_hz
,
2589 EXPORT_SYMBOL_GPL(spi_setup
);
2591 static int __spi_validate(struct spi_device
*spi
, struct spi_message
*message
)
2593 struct spi_master
*master
= spi
->master
;
2594 struct spi_transfer
*xfer
;
2597 if (list_empty(&message
->transfers
))
2600 /* Half-duplex links include original MicroWire, and ones with
2601 * only one data pin like SPI_3WIRE (switches direction) or where
2602 * either MOSI or MISO is missing. They can also be caused by
2603 * software limitations.
2605 if ((master
->flags
& SPI_MASTER_HALF_DUPLEX
)
2606 || (spi
->mode
& SPI_3WIRE
)) {
2607 unsigned flags
= master
->flags
;
2609 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
2610 if (xfer
->rx_buf
&& xfer
->tx_buf
)
2612 if ((flags
& SPI_MASTER_NO_TX
) && xfer
->tx_buf
)
2614 if ((flags
& SPI_MASTER_NO_RX
) && xfer
->rx_buf
)
2620 * Set transfer bits_per_word and max speed as spi device default if
2621 * it is not set for this transfer.
2622 * Set transfer tx_nbits and rx_nbits as single transfer default
2623 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2625 message
->frame_length
= 0;
2626 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
2627 message
->frame_length
+= xfer
->len
;
2628 if (!xfer
->bits_per_word
)
2629 xfer
->bits_per_word
= spi
->bits_per_word
;
2631 if (!xfer
->speed_hz
)
2632 xfer
->speed_hz
= spi
->max_speed_hz
;
2633 if (!xfer
->speed_hz
)
2634 xfer
->speed_hz
= master
->max_speed_hz
;
2636 if (master
->max_speed_hz
&&
2637 xfer
->speed_hz
> master
->max_speed_hz
)
2638 xfer
->speed_hz
= master
->max_speed_hz
;
2640 if (__spi_validate_bits_per_word(master
, xfer
->bits_per_word
))
2644 * SPI transfer length should be multiple of SPI word size
2645 * where SPI word size should be power-of-two multiple
2647 if (xfer
->bits_per_word
<= 8)
2649 else if (xfer
->bits_per_word
<= 16)
2654 /* No partial transfers accepted */
2655 if (xfer
->len
% w_size
)
2658 if (xfer
->speed_hz
&& master
->min_speed_hz
&&
2659 xfer
->speed_hz
< master
->min_speed_hz
)
2662 if (xfer
->tx_buf
&& !xfer
->tx_nbits
)
2663 xfer
->tx_nbits
= SPI_NBITS_SINGLE
;
2664 if (xfer
->rx_buf
&& !xfer
->rx_nbits
)
2665 xfer
->rx_nbits
= SPI_NBITS_SINGLE
;
2666 /* check transfer tx/rx_nbits:
2667 * 1. check the value matches one of single, dual and quad
2668 * 2. check tx/rx_nbits match the mode in spi_device
2671 if (xfer
->tx_nbits
!= SPI_NBITS_SINGLE
&&
2672 xfer
->tx_nbits
!= SPI_NBITS_DUAL
&&
2673 xfer
->tx_nbits
!= SPI_NBITS_QUAD
)
2675 if ((xfer
->tx_nbits
== SPI_NBITS_DUAL
) &&
2676 !(spi
->mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
)))
2678 if ((xfer
->tx_nbits
== SPI_NBITS_QUAD
) &&
2679 !(spi
->mode
& SPI_TX_QUAD
))
2682 /* check transfer rx_nbits */
2684 if (xfer
->rx_nbits
!= SPI_NBITS_SINGLE
&&
2685 xfer
->rx_nbits
!= SPI_NBITS_DUAL
&&
2686 xfer
->rx_nbits
!= SPI_NBITS_QUAD
)
2688 if ((xfer
->rx_nbits
== SPI_NBITS_DUAL
) &&
2689 !(spi
->mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
)))
2691 if ((xfer
->rx_nbits
== SPI_NBITS_QUAD
) &&
2692 !(spi
->mode
& SPI_RX_QUAD
))
2697 message
->status
= -EINPROGRESS
;
2702 static int __spi_async(struct spi_device
*spi
, struct spi_message
*message
)
2704 struct spi_master
*master
= spi
->master
;
2708 SPI_STATISTICS_INCREMENT_FIELD(&master
->statistics
, spi_async
);
2709 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_async
);
2711 trace_spi_message_submit(message
);
2713 return master
->transfer(spi
, message
);
2717 * spi_async - asynchronous SPI transfer
2718 * @spi: device with which data will be exchanged
2719 * @message: describes the data transfers, including completion callback
2720 * Context: any (irqs may be blocked, etc)
2722 * This call may be used in_irq and other contexts which can't sleep,
2723 * as well as from task contexts which can sleep.
2725 * The completion callback is invoked in a context which can't sleep.
2726 * Before that invocation, the value of message->status is undefined.
2727 * When the callback is issued, message->status holds either zero (to
2728 * indicate complete success) or a negative error code. After that
2729 * callback returns, the driver which issued the transfer request may
2730 * deallocate the associated memory; it's no longer in use by any SPI
2731 * core or controller driver code.
2733 * Note that although all messages to a spi_device are handled in
2734 * FIFO order, messages may go to different devices in other orders.
2735 * Some device might be higher priority, or have various "hard" access
2736 * time requirements, for example.
2738 * On detection of any fault during the transfer, processing of
2739 * the entire message is aborted, and the device is deselected.
2740 * Until returning from the associated message completion callback,
2741 * no other spi_message queued to that device will be processed.
2742 * (This rule applies equally to all the synchronous transfer calls,
2743 * which are wrappers around this core asynchronous primitive.)
2745 * Return: zero on success, else a negative error code.
2747 int spi_async(struct spi_device
*spi
, struct spi_message
*message
)
2749 struct spi_master
*master
= spi
->master
;
2751 unsigned long flags
;
2753 ret
= __spi_validate(spi
, message
);
2757 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2759 if (master
->bus_lock_flag
)
2762 ret
= __spi_async(spi
, message
);
2764 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2768 EXPORT_SYMBOL_GPL(spi_async
);
2771 * spi_async_locked - version of spi_async with exclusive bus usage
2772 * @spi: device with which data will be exchanged
2773 * @message: describes the data transfers, including completion callback
2774 * Context: any (irqs may be blocked, etc)
2776 * This call may be used in_irq and other contexts which can't sleep,
2777 * as well as from task contexts which can sleep.
2779 * The completion callback is invoked in a context which can't sleep.
2780 * Before that invocation, the value of message->status is undefined.
2781 * When the callback is issued, message->status holds either zero (to
2782 * indicate complete success) or a negative error code. After that
2783 * callback returns, the driver which issued the transfer request may
2784 * deallocate the associated memory; it's no longer in use by any SPI
2785 * core or controller driver code.
2787 * Note that although all messages to a spi_device are handled in
2788 * FIFO order, messages may go to different devices in other orders.
2789 * Some device might be higher priority, or have various "hard" access
2790 * time requirements, for example.
2792 * On detection of any fault during the transfer, processing of
2793 * the entire message is aborted, and the device is deselected.
2794 * Until returning from the associated message completion callback,
2795 * no other spi_message queued to that device will be processed.
2796 * (This rule applies equally to all the synchronous transfer calls,
2797 * which are wrappers around this core asynchronous primitive.)
2799 * Return: zero on success, else a negative error code.
2801 int spi_async_locked(struct spi_device
*spi
, struct spi_message
*message
)
2803 struct spi_master
*master
= spi
->master
;
2805 unsigned long flags
;
2807 ret
= __spi_validate(spi
, message
);
2811 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2813 ret
= __spi_async(spi
, message
);
2815 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2820 EXPORT_SYMBOL_GPL(spi_async_locked
);
2823 int spi_flash_read(struct spi_device
*spi
,
2824 struct spi_flash_read_message
*msg
)
2827 struct spi_master
*master
= spi
->master
;
2828 struct device
*rx_dev
= NULL
;
2831 if ((msg
->opcode_nbits
== SPI_NBITS_DUAL
||
2832 msg
->addr_nbits
== SPI_NBITS_DUAL
) &&
2833 !(spi
->mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
)))
2835 if ((msg
->opcode_nbits
== SPI_NBITS_QUAD
||
2836 msg
->addr_nbits
== SPI_NBITS_QUAD
) &&
2837 !(spi
->mode
& SPI_TX_QUAD
))
2839 if (msg
->data_nbits
== SPI_NBITS_DUAL
&&
2840 !(spi
->mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
)))
2842 if (msg
->data_nbits
== SPI_NBITS_QUAD
&&
2843 !(spi
->mode
& SPI_RX_QUAD
))
2846 if (master
->auto_runtime_pm
) {
2847 ret
= pm_runtime_get_sync(master
->dev
.parent
);
2849 dev_err(&master
->dev
, "Failed to power device: %d\n",
2855 mutex_lock(&master
->bus_lock_mutex
);
2856 mutex_lock(&master
->io_mutex
);
2857 if (master
->dma_rx
) {
2858 rx_dev
= master
->dma_rx
->device
->dev
;
2859 ret
= spi_map_buf(master
, rx_dev
, &msg
->rx_sg
,
2863 msg
->cur_msg_mapped
= true;
2865 ret
= master
->spi_flash_read(spi
, msg
);
2866 if (msg
->cur_msg_mapped
)
2867 spi_unmap_buf(master
, rx_dev
, &msg
->rx_sg
,
2869 mutex_unlock(&master
->io_mutex
);
2870 mutex_unlock(&master
->bus_lock_mutex
);
2872 if (master
->auto_runtime_pm
)
2873 pm_runtime_put(master
->dev
.parent
);
2877 EXPORT_SYMBOL_GPL(spi_flash_read
);
2879 /*-------------------------------------------------------------------------*/
2881 /* Utility methods for SPI master protocol drivers, layered on
2882 * top of the core. Some other utility methods are defined as
2886 static void spi_complete(void *arg
)
2891 static int __spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
2893 DECLARE_COMPLETION_ONSTACK(done
);
2895 struct spi_master
*master
= spi
->master
;
2896 unsigned long flags
;
2898 status
= __spi_validate(spi
, message
);
2902 message
->complete
= spi_complete
;
2903 message
->context
= &done
;
2906 SPI_STATISTICS_INCREMENT_FIELD(&master
->statistics
, spi_sync
);
2907 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_sync
);
2909 /* If we're not using the legacy transfer method then we will
2910 * try to transfer in the calling context so special case.
2911 * This code would be less tricky if we could remove the
2912 * support for driver implemented message queues.
2914 if (master
->transfer
== spi_queued_transfer
) {
2915 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2917 trace_spi_message_submit(message
);
2919 status
= __spi_queued_transfer(spi
, message
, false);
2921 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2923 status
= spi_async_locked(spi
, message
);
2927 /* Push out the messages in the calling context if we
2930 if (master
->transfer
== spi_queued_transfer
) {
2931 SPI_STATISTICS_INCREMENT_FIELD(&master
->statistics
,
2932 spi_sync_immediate
);
2933 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
,
2934 spi_sync_immediate
);
2935 __spi_pump_messages(master
, false);
2938 wait_for_completion(&done
);
2939 status
= message
->status
;
2941 message
->context
= NULL
;
2946 * spi_sync - blocking/synchronous SPI data transfers
2947 * @spi: device with which data will be exchanged
2948 * @message: describes the data transfers
2949 * Context: can sleep
2951 * This call may only be used from a context that may sleep. The sleep
2952 * is non-interruptible, and has no timeout. Low-overhead controller
2953 * drivers may DMA directly into and out of the message buffers.
2955 * Note that the SPI device's chip select is active during the message,
2956 * and then is normally disabled between messages. Drivers for some
2957 * frequently-used devices may want to minimize costs of selecting a chip,
2958 * by leaving it selected in anticipation that the next message will go
2959 * to the same chip. (That may increase power usage.)
2961 * Also, the caller is guaranteeing that the memory associated with the
2962 * message will not be freed before this call returns.
2964 * Return: zero on success, else a negative error code.
2966 int spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
2970 mutex_lock(&spi
->master
->bus_lock_mutex
);
2971 ret
= __spi_sync(spi
, message
);
2972 mutex_unlock(&spi
->master
->bus_lock_mutex
);
2976 EXPORT_SYMBOL_GPL(spi_sync
);
2979 * spi_sync_locked - version of spi_sync with exclusive bus usage
2980 * @spi: device with which data will be exchanged
2981 * @message: describes the data transfers
2982 * Context: can sleep
2984 * This call may only be used from a context that may sleep. The sleep
2985 * is non-interruptible, and has no timeout. Low-overhead controller
2986 * drivers may DMA directly into and out of the message buffers.
2988 * This call should be used by drivers that require exclusive access to the
2989 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2990 * be released by a spi_bus_unlock call when the exclusive access is over.
2992 * Return: zero on success, else a negative error code.
2994 int spi_sync_locked(struct spi_device
*spi
, struct spi_message
*message
)
2996 return __spi_sync(spi
, message
);
2998 EXPORT_SYMBOL_GPL(spi_sync_locked
);
3001 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3002 * @master: SPI bus master that should be locked for exclusive bus access
3003 * Context: can sleep
3005 * This call may only be used from a context that may sleep. The sleep
3006 * is non-interruptible, and has no timeout.
3008 * This call should be used by drivers that require exclusive access to the
3009 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3010 * exclusive access is over. Data transfer must be done by spi_sync_locked
3011 * and spi_async_locked calls when the SPI bus lock is held.
3013 * Return: always zero.
3015 int spi_bus_lock(struct spi_master
*master
)
3017 unsigned long flags
;
3019 mutex_lock(&master
->bus_lock_mutex
);
3021 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
3022 master
->bus_lock_flag
= 1;
3023 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
3025 /* mutex remains locked until spi_bus_unlock is called */
3029 EXPORT_SYMBOL_GPL(spi_bus_lock
);
3032 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3033 * @master: SPI bus master that was locked for exclusive bus access
3034 * Context: can sleep
3036 * This call may only be used from a context that may sleep. The sleep
3037 * is non-interruptible, and has no timeout.
3039 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3042 * Return: always zero.
3044 int spi_bus_unlock(struct spi_master
*master
)
3046 master
->bus_lock_flag
= 0;
3048 mutex_unlock(&master
->bus_lock_mutex
);
3052 EXPORT_SYMBOL_GPL(spi_bus_unlock
);
3054 /* portable code must never pass more than 32 bytes */
3055 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3060 * spi_write_then_read - SPI synchronous write followed by read
3061 * @spi: device with which data will be exchanged
3062 * @txbuf: data to be written (need not be dma-safe)
3063 * @n_tx: size of txbuf, in bytes
3064 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3065 * @n_rx: size of rxbuf, in bytes
3066 * Context: can sleep
3068 * This performs a half duplex MicroWire style transaction with the
3069 * device, sending txbuf and then reading rxbuf. The return value
3070 * is zero for success, else a negative errno status code.
3071 * This call may only be used from a context that may sleep.
3073 * Parameters to this routine are always copied using a small buffer;
3074 * portable code should never use this for more than 32 bytes.
3075 * Performance-sensitive or bulk transfer code should instead use
3076 * spi_{async,sync}() calls with dma-safe buffers.
3078 * Return: zero on success, else a negative error code.
3080 int spi_write_then_read(struct spi_device
*spi
,
3081 const void *txbuf
, unsigned n_tx
,
3082 void *rxbuf
, unsigned n_rx
)
3084 static DEFINE_MUTEX(lock
);
3087 struct spi_message message
;
3088 struct spi_transfer x
[2];
3091 /* Use preallocated DMA-safe buffer if we can. We can't avoid
3092 * copying here, (as a pure convenience thing), but we can
3093 * keep heap costs out of the hot path unless someone else is
3094 * using the pre-allocated buffer or the transfer is too large.
3096 if ((n_tx
+ n_rx
) > SPI_BUFSIZ
|| !mutex_trylock(&lock
)) {
3097 local_buf
= kmalloc(max((unsigned)SPI_BUFSIZ
, n_tx
+ n_rx
),
3098 GFP_KERNEL
| GFP_DMA
);
3105 spi_message_init(&message
);
3106 memset(x
, 0, sizeof(x
));
3109 spi_message_add_tail(&x
[0], &message
);
3113 spi_message_add_tail(&x
[1], &message
);
3116 memcpy(local_buf
, txbuf
, n_tx
);
3117 x
[0].tx_buf
= local_buf
;
3118 x
[1].rx_buf
= local_buf
+ n_tx
;
3121 status
= spi_sync(spi
, &message
);
3123 memcpy(rxbuf
, x
[1].rx_buf
, n_rx
);
3125 if (x
[0].tx_buf
== buf
)
3126 mutex_unlock(&lock
);
3132 EXPORT_SYMBOL_GPL(spi_write_then_read
);
3134 /*-------------------------------------------------------------------------*/
3136 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3137 static int __spi_of_device_match(struct device
*dev
, void *data
)
3139 return dev
->of_node
== data
;
3142 /* must call put_device() when done with returned spi_device device */
3143 static struct spi_device
*of_find_spi_device_by_node(struct device_node
*node
)
3145 struct device
*dev
= bus_find_device(&spi_bus_type
, NULL
, node
,
3146 __spi_of_device_match
);
3147 return dev
? to_spi_device(dev
) : NULL
;
3150 static int __spi_of_master_match(struct device
*dev
, const void *data
)
3152 return dev
->of_node
== data
;
3155 /* the spi masters are not using spi_bus, so we find it with another way */
3156 static struct spi_master
*of_find_spi_master_by_node(struct device_node
*node
)
3160 dev
= class_find_device(&spi_master_class
, NULL
, node
,
3161 __spi_of_master_match
);
3165 /* reference got in class_find_device */
3166 return container_of(dev
, struct spi_master
, dev
);
3169 static int of_spi_notify(struct notifier_block
*nb
, unsigned long action
,
3172 struct of_reconfig_data
*rd
= arg
;
3173 struct spi_master
*master
;
3174 struct spi_device
*spi
;
3176 switch (of_reconfig_get_state_change(action
, arg
)) {
3177 case OF_RECONFIG_CHANGE_ADD
:
3178 master
= of_find_spi_master_by_node(rd
->dn
->parent
);
3180 return NOTIFY_OK
; /* not for us */
3182 if (of_node_test_and_set_flag(rd
->dn
, OF_POPULATED
)) {
3183 put_device(&master
->dev
);
3187 spi
= of_register_spi_device(master
, rd
->dn
);
3188 put_device(&master
->dev
);
3191 pr_err("%s: failed to create for '%s'\n",
3192 __func__
, rd
->dn
->full_name
);
3193 of_node_clear_flag(rd
->dn
, OF_POPULATED
);
3194 return notifier_from_errno(PTR_ERR(spi
));
3198 case OF_RECONFIG_CHANGE_REMOVE
:
3199 /* already depopulated? */
3200 if (!of_node_check_flag(rd
->dn
, OF_POPULATED
))
3203 /* find our device by node */
3204 spi
= of_find_spi_device_by_node(rd
->dn
);
3206 return NOTIFY_OK
; /* no? not meant for us */
3208 /* unregister takes one ref away */
3209 spi_unregister_device(spi
);
3211 /* and put the reference of the find */
3212 put_device(&spi
->dev
);
3219 static struct notifier_block spi_of_notifier
= {
3220 .notifier_call
= of_spi_notify
,
3222 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3223 extern struct notifier_block spi_of_notifier
;
3224 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3226 #if IS_ENABLED(CONFIG_ACPI)
3227 static int spi_acpi_master_match(struct device
*dev
, const void *data
)
3229 return ACPI_COMPANION(dev
->parent
) == data
;
3232 static int spi_acpi_device_match(struct device
*dev
, void *data
)
3234 return ACPI_COMPANION(dev
) == data
;
3237 static struct spi_master
*acpi_spi_find_master_by_adev(struct acpi_device
*adev
)
3241 dev
= class_find_device(&spi_master_class
, NULL
, adev
,
3242 spi_acpi_master_match
);
3246 return container_of(dev
, struct spi_master
, dev
);
3249 static struct spi_device
*acpi_spi_find_device_by_adev(struct acpi_device
*adev
)
3253 dev
= bus_find_device(&spi_bus_type
, NULL
, adev
, spi_acpi_device_match
);
3255 return dev
? to_spi_device(dev
) : NULL
;
3258 static int acpi_spi_notify(struct notifier_block
*nb
, unsigned long value
,
3261 struct acpi_device
*adev
= arg
;
3262 struct spi_master
*master
;
3263 struct spi_device
*spi
;
3266 case ACPI_RECONFIG_DEVICE_ADD
:
3267 master
= acpi_spi_find_master_by_adev(adev
->parent
);
3271 acpi_register_spi_device(master
, adev
);
3272 put_device(&master
->dev
);
3274 case ACPI_RECONFIG_DEVICE_REMOVE
:
3275 if (!acpi_device_enumerated(adev
))
3278 spi
= acpi_spi_find_device_by_adev(adev
);
3282 spi_unregister_device(spi
);
3283 put_device(&spi
->dev
);
3290 static struct notifier_block spi_acpi_notifier
= {
3291 .notifier_call
= acpi_spi_notify
,
3294 extern struct notifier_block spi_acpi_notifier
;
3297 static int __init
spi_init(void)
3301 buf
= kmalloc(SPI_BUFSIZ
, GFP_KERNEL
);
3307 status
= bus_register(&spi_bus_type
);
3311 status
= class_register(&spi_master_class
);
3315 if (IS_ENABLED(CONFIG_OF_DYNAMIC
))
3316 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier
));
3317 if (IS_ENABLED(CONFIG_ACPI
))
3318 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier
));
3323 bus_unregister(&spi_bus_type
);
3331 /* board_info is normally registered in arch_initcall(),
3332 * but even essential drivers wait till later
3334 * REVISIT only boardinfo really needs static linking. the rest (device and
3335 * driver registration) _could_ be dynamically linked (modular) ... costs
3336 * include needing to have boardinfo data structures be much more public.
3338 postcore_initcall(spi_init
);