]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/spi/spi.c
Merge tag 'hyperv-fixes-signed' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / drivers / spi / spi.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/of_gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm_domain.h>
25 #include <linux/property.h>
26 #include <linux/export.h>
27 #include <linux/sched/rt.h>
28 #include <uapi/linux/sched/types.h>
29 #include <linux/delay.h>
30 #include <linux/kthread.h>
31 #include <linux/ioport.h>
32 #include <linux/acpi.h>
33 #include <linux/highmem.h>
34 #include <linux/idr.h>
35 #include <linux/platform_data/x86/apple.h>
36
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/spi.h>
39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
41
42 #include "internals.h"
43
44 static DEFINE_IDR(spi_master_idr);
45
46 static void spidev_release(struct device *dev)
47 {
48 struct spi_device *spi = to_spi_device(dev);
49
50 /* spi controllers may cleanup for released devices */
51 if (spi->controller->cleanup)
52 spi->controller->cleanup(spi);
53
54 spi_controller_put(spi->controller);
55 kfree(spi->driver_override);
56 kfree(spi);
57 }
58
59 static ssize_t
60 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
61 {
62 const struct spi_device *spi = to_spi_device(dev);
63 int len;
64
65 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
66 if (len != -ENODEV)
67 return len;
68
69 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
70 }
71 static DEVICE_ATTR_RO(modalias);
72
73 static ssize_t driver_override_store(struct device *dev,
74 struct device_attribute *a,
75 const char *buf, size_t count)
76 {
77 struct spi_device *spi = to_spi_device(dev);
78 const char *end = memchr(buf, '\n', count);
79 const size_t len = end ? end - buf : count;
80 const char *driver_override, *old;
81
82 /* We need to keep extra room for a newline when displaying value */
83 if (len >= (PAGE_SIZE - 1))
84 return -EINVAL;
85
86 driver_override = kstrndup(buf, len, GFP_KERNEL);
87 if (!driver_override)
88 return -ENOMEM;
89
90 device_lock(dev);
91 old = spi->driver_override;
92 if (len) {
93 spi->driver_override = driver_override;
94 } else {
95 /* Empty string, disable driver override */
96 spi->driver_override = NULL;
97 kfree(driver_override);
98 }
99 device_unlock(dev);
100 kfree(old);
101
102 return count;
103 }
104
105 static ssize_t driver_override_show(struct device *dev,
106 struct device_attribute *a, char *buf)
107 {
108 const struct spi_device *spi = to_spi_device(dev);
109 ssize_t len;
110
111 device_lock(dev);
112 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
113 device_unlock(dev);
114 return len;
115 }
116 static DEVICE_ATTR_RW(driver_override);
117
118 #define SPI_STATISTICS_ATTRS(field, file) \
119 static ssize_t spi_controller_##field##_show(struct device *dev, \
120 struct device_attribute *attr, \
121 char *buf) \
122 { \
123 struct spi_controller *ctlr = container_of(dev, \
124 struct spi_controller, dev); \
125 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
126 } \
127 static struct device_attribute dev_attr_spi_controller_##field = { \
128 .attr = { .name = file, .mode = 0444 }, \
129 .show = spi_controller_##field##_show, \
130 }; \
131 static ssize_t spi_device_##field##_show(struct device *dev, \
132 struct device_attribute *attr, \
133 char *buf) \
134 { \
135 struct spi_device *spi = to_spi_device(dev); \
136 return spi_statistics_##field##_show(&spi->statistics, buf); \
137 } \
138 static struct device_attribute dev_attr_spi_device_##field = { \
139 .attr = { .name = file, .mode = 0444 }, \
140 .show = spi_device_##field##_show, \
141 }
142
143 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
144 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
145 char *buf) \
146 { \
147 unsigned long flags; \
148 ssize_t len; \
149 spin_lock_irqsave(&stat->lock, flags); \
150 len = sprintf(buf, format_string, stat->field); \
151 spin_unlock_irqrestore(&stat->lock, flags); \
152 return len; \
153 } \
154 SPI_STATISTICS_ATTRS(name, file)
155
156 #define SPI_STATISTICS_SHOW(field, format_string) \
157 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
158 field, format_string)
159
160 SPI_STATISTICS_SHOW(messages, "%lu");
161 SPI_STATISTICS_SHOW(transfers, "%lu");
162 SPI_STATISTICS_SHOW(errors, "%lu");
163 SPI_STATISTICS_SHOW(timedout, "%lu");
164
165 SPI_STATISTICS_SHOW(spi_sync, "%lu");
166 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
167 SPI_STATISTICS_SHOW(spi_async, "%lu");
168
169 SPI_STATISTICS_SHOW(bytes, "%llu");
170 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
171 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
172
173 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
174 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
175 "transfer_bytes_histo_" number, \
176 transfer_bytes_histo[index], "%lu")
177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
190 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
191 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
192 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
193 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
194
195 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
196
197 static struct attribute *spi_dev_attrs[] = {
198 &dev_attr_modalias.attr,
199 &dev_attr_driver_override.attr,
200 NULL,
201 };
202
203 static const struct attribute_group spi_dev_group = {
204 .attrs = spi_dev_attrs,
205 };
206
207 static struct attribute *spi_device_statistics_attrs[] = {
208 &dev_attr_spi_device_messages.attr,
209 &dev_attr_spi_device_transfers.attr,
210 &dev_attr_spi_device_errors.attr,
211 &dev_attr_spi_device_timedout.attr,
212 &dev_attr_spi_device_spi_sync.attr,
213 &dev_attr_spi_device_spi_sync_immediate.attr,
214 &dev_attr_spi_device_spi_async.attr,
215 &dev_attr_spi_device_bytes.attr,
216 &dev_attr_spi_device_bytes_rx.attr,
217 &dev_attr_spi_device_bytes_tx.attr,
218 &dev_attr_spi_device_transfer_bytes_histo0.attr,
219 &dev_attr_spi_device_transfer_bytes_histo1.attr,
220 &dev_attr_spi_device_transfer_bytes_histo2.attr,
221 &dev_attr_spi_device_transfer_bytes_histo3.attr,
222 &dev_attr_spi_device_transfer_bytes_histo4.attr,
223 &dev_attr_spi_device_transfer_bytes_histo5.attr,
224 &dev_attr_spi_device_transfer_bytes_histo6.attr,
225 &dev_attr_spi_device_transfer_bytes_histo7.attr,
226 &dev_attr_spi_device_transfer_bytes_histo8.attr,
227 &dev_attr_spi_device_transfer_bytes_histo9.attr,
228 &dev_attr_spi_device_transfer_bytes_histo10.attr,
229 &dev_attr_spi_device_transfer_bytes_histo11.attr,
230 &dev_attr_spi_device_transfer_bytes_histo12.attr,
231 &dev_attr_spi_device_transfer_bytes_histo13.attr,
232 &dev_attr_spi_device_transfer_bytes_histo14.attr,
233 &dev_attr_spi_device_transfer_bytes_histo15.attr,
234 &dev_attr_spi_device_transfer_bytes_histo16.attr,
235 &dev_attr_spi_device_transfers_split_maxsize.attr,
236 NULL,
237 };
238
239 static const struct attribute_group spi_device_statistics_group = {
240 .name = "statistics",
241 .attrs = spi_device_statistics_attrs,
242 };
243
244 static const struct attribute_group *spi_dev_groups[] = {
245 &spi_dev_group,
246 &spi_device_statistics_group,
247 NULL,
248 };
249
250 static struct attribute *spi_controller_statistics_attrs[] = {
251 &dev_attr_spi_controller_messages.attr,
252 &dev_attr_spi_controller_transfers.attr,
253 &dev_attr_spi_controller_errors.attr,
254 &dev_attr_spi_controller_timedout.attr,
255 &dev_attr_spi_controller_spi_sync.attr,
256 &dev_attr_spi_controller_spi_sync_immediate.attr,
257 &dev_attr_spi_controller_spi_async.attr,
258 &dev_attr_spi_controller_bytes.attr,
259 &dev_attr_spi_controller_bytes_rx.attr,
260 &dev_attr_spi_controller_bytes_tx.attr,
261 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
262 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
263 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
264 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
265 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
266 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
267 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
268 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
269 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
270 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
271 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
272 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
273 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
274 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
275 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
276 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
277 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
278 &dev_attr_spi_controller_transfers_split_maxsize.attr,
279 NULL,
280 };
281
282 static const struct attribute_group spi_controller_statistics_group = {
283 .name = "statistics",
284 .attrs = spi_controller_statistics_attrs,
285 };
286
287 static const struct attribute_group *spi_master_groups[] = {
288 &spi_controller_statistics_group,
289 NULL,
290 };
291
292 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
293 struct spi_transfer *xfer,
294 struct spi_controller *ctlr)
295 {
296 unsigned long flags;
297 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
298
299 if (l2len < 0)
300 l2len = 0;
301
302 spin_lock_irqsave(&stats->lock, flags);
303
304 stats->transfers++;
305 stats->transfer_bytes_histo[l2len]++;
306
307 stats->bytes += xfer->len;
308 if ((xfer->tx_buf) &&
309 (xfer->tx_buf != ctlr->dummy_tx))
310 stats->bytes_tx += xfer->len;
311 if ((xfer->rx_buf) &&
312 (xfer->rx_buf != ctlr->dummy_rx))
313 stats->bytes_rx += xfer->len;
314
315 spin_unlock_irqrestore(&stats->lock, flags);
316 }
317 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
318
319 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
320 * and the sysfs version makes coldplug work too.
321 */
322
323 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
324 const struct spi_device *sdev)
325 {
326 while (id->name[0]) {
327 if (!strcmp(sdev->modalias, id->name))
328 return id;
329 id++;
330 }
331 return NULL;
332 }
333
334 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
335 {
336 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
337
338 return spi_match_id(sdrv->id_table, sdev);
339 }
340 EXPORT_SYMBOL_GPL(spi_get_device_id);
341
342 static int spi_match_device(struct device *dev, struct device_driver *drv)
343 {
344 const struct spi_device *spi = to_spi_device(dev);
345 const struct spi_driver *sdrv = to_spi_driver(drv);
346
347 /* Check override first, and if set, only use the named driver */
348 if (spi->driver_override)
349 return strcmp(spi->driver_override, drv->name) == 0;
350
351 /* Attempt an OF style match */
352 if (of_driver_match_device(dev, drv))
353 return 1;
354
355 /* Then try ACPI */
356 if (acpi_driver_match_device(dev, drv))
357 return 1;
358
359 if (sdrv->id_table)
360 return !!spi_match_id(sdrv->id_table, spi);
361
362 return strcmp(spi->modalias, drv->name) == 0;
363 }
364
365 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
366 {
367 const struct spi_device *spi = to_spi_device(dev);
368 int rc;
369
370 rc = acpi_device_uevent_modalias(dev, env);
371 if (rc != -ENODEV)
372 return rc;
373
374 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
375 }
376
377 struct bus_type spi_bus_type = {
378 .name = "spi",
379 .dev_groups = spi_dev_groups,
380 .match = spi_match_device,
381 .uevent = spi_uevent,
382 };
383 EXPORT_SYMBOL_GPL(spi_bus_type);
384
385
386 static int spi_drv_probe(struct device *dev)
387 {
388 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
389 struct spi_device *spi = to_spi_device(dev);
390 int ret;
391
392 ret = of_clk_set_defaults(dev->of_node, false);
393 if (ret)
394 return ret;
395
396 if (dev->of_node) {
397 spi->irq = of_irq_get(dev->of_node, 0);
398 if (spi->irq == -EPROBE_DEFER)
399 return -EPROBE_DEFER;
400 if (spi->irq < 0)
401 spi->irq = 0;
402 }
403
404 ret = dev_pm_domain_attach(dev, true);
405 if (ret)
406 return ret;
407
408 ret = sdrv->probe(spi);
409 if (ret)
410 dev_pm_domain_detach(dev, true);
411
412 return ret;
413 }
414
415 static int spi_drv_remove(struct device *dev)
416 {
417 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
418 int ret;
419
420 ret = sdrv->remove(to_spi_device(dev));
421 dev_pm_domain_detach(dev, true);
422
423 return ret;
424 }
425
426 static void spi_drv_shutdown(struct device *dev)
427 {
428 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
429
430 sdrv->shutdown(to_spi_device(dev));
431 }
432
433 /**
434 * __spi_register_driver - register a SPI driver
435 * @owner: owner module of the driver to register
436 * @sdrv: the driver to register
437 * Context: can sleep
438 *
439 * Return: zero on success, else a negative error code.
440 */
441 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
442 {
443 sdrv->driver.owner = owner;
444 sdrv->driver.bus = &spi_bus_type;
445 if (sdrv->probe)
446 sdrv->driver.probe = spi_drv_probe;
447 if (sdrv->remove)
448 sdrv->driver.remove = spi_drv_remove;
449 if (sdrv->shutdown)
450 sdrv->driver.shutdown = spi_drv_shutdown;
451 return driver_register(&sdrv->driver);
452 }
453 EXPORT_SYMBOL_GPL(__spi_register_driver);
454
455 /*-------------------------------------------------------------------------*/
456
457 /* SPI devices should normally not be created by SPI device drivers; that
458 * would make them board-specific. Similarly with SPI controller drivers.
459 * Device registration normally goes into like arch/.../mach.../board-YYY.c
460 * with other readonly (flashable) information about mainboard devices.
461 */
462
463 struct boardinfo {
464 struct list_head list;
465 struct spi_board_info board_info;
466 };
467
468 static LIST_HEAD(board_list);
469 static LIST_HEAD(spi_controller_list);
470
471 /*
472 * Used to protect add/del operation for board_info list and
473 * spi_controller list, and their matching process
474 * also used to protect object of type struct idr
475 */
476 static DEFINE_MUTEX(board_lock);
477
478 /*
479 * Prevents addition of devices with same chip select and
480 * addition of devices below an unregistering controller.
481 */
482 static DEFINE_MUTEX(spi_add_lock);
483
484 /**
485 * spi_alloc_device - Allocate a new SPI device
486 * @ctlr: Controller to which device is connected
487 * Context: can sleep
488 *
489 * Allows a driver to allocate and initialize a spi_device without
490 * registering it immediately. This allows a driver to directly
491 * fill the spi_device with device parameters before calling
492 * spi_add_device() on it.
493 *
494 * Caller is responsible to call spi_add_device() on the returned
495 * spi_device structure to add it to the SPI controller. If the caller
496 * needs to discard the spi_device without adding it, then it should
497 * call spi_dev_put() on it.
498 *
499 * Return: a pointer to the new device, or NULL.
500 */
501 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
502 {
503 struct spi_device *spi;
504
505 if (!spi_controller_get(ctlr))
506 return NULL;
507
508 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
509 if (!spi) {
510 spi_controller_put(ctlr);
511 return NULL;
512 }
513
514 spi->master = spi->controller = ctlr;
515 spi->dev.parent = &ctlr->dev;
516 spi->dev.bus = &spi_bus_type;
517 spi->dev.release = spidev_release;
518 spi->cs_gpio = -ENOENT;
519 spi->mode = ctlr->buswidth_override_bits;
520
521 spin_lock_init(&spi->statistics.lock);
522
523 device_initialize(&spi->dev);
524 return spi;
525 }
526 EXPORT_SYMBOL_GPL(spi_alloc_device);
527
528 static void spi_dev_set_name(struct spi_device *spi)
529 {
530 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
531
532 if (adev) {
533 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
534 return;
535 }
536
537 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
538 spi->chip_select);
539 }
540
541 static int spi_dev_check(struct device *dev, void *data)
542 {
543 struct spi_device *spi = to_spi_device(dev);
544 struct spi_device *new_spi = data;
545
546 if (spi->controller == new_spi->controller &&
547 spi->chip_select == new_spi->chip_select)
548 return -EBUSY;
549 return 0;
550 }
551
552 /**
553 * spi_add_device - Add spi_device allocated with spi_alloc_device
554 * @spi: spi_device to register
555 *
556 * Companion function to spi_alloc_device. Devices allocated with
557 * spi_alloc_device can be added onto the spi bus with this function.
558 *
559 * Return: 0 on success; negative errno on failure
560 */
561 int spi_add_device(struct spi_device *spi)
562 {
563 struct spi_controller *ctlr = spi->controller;
564 struct device *dev = ctlr->dev.parent;
565 int status;
566
567 /* Chipselects are numbered 0..max; validate. */
568 if (spi->chip_select >= ctlr->num_chipselect) {
569 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
570 ctlr->num_chipselect);
571 return -EINVAL;
572 }
573
574 /* Set the bus ID string */
575 spi_dev_set_name(spi);
576
577 /* We need to make sure there's no other device with this
578 * chipselect **BEFORE** we call setup(), else we'll trash
579 * its configuration. Lock against concurrent add() calls.
580 */
581 mutex_lock(&spi_add_lock);
582
583 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
584 if (status) {
585 dev_err(dev, "chipselect %d already in use\n",
586 spi->chip_select);
587 goto done;
588 }
589
590 /* Controller may unregister concurrently */
591 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
592 !device_is_registered(&ctlr->dev)) {
593 status = -ENODEV;
594 goto done;
595 }
596
597 /* Descriptors take precedence */
598 if (ctlr->cs_gpiods)
599 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
600 else if (ctlr->cs_gpios)
601 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
602
603 /* Drivers may modify this initial i/o setup, but will
604 * normally rely on the device being setup. Devices
605 * using SPI_CS_HIGH can't coexist well otherwise...
606 */
607 status = spi_setup(spi);
608 if (status < 0) {
609 dev_err(dev, "can't setup %s, status %d\n",
610 dev_name(&spi->dev), status);
611 goto done;
612 }
613
614 /* Device may be bound to an active driver when this returns */
615 status = device_add(&spi->dev);
616 if (status < 0)
617 dev_err(dev, "can't add %s, status %d\n",
618 dev_name(&spi->dev), status);
619 else
620 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
621
622 done:
623 mutex_unlock(&spi_add_lock);
624 return status;
625 }
626 EXPORT_SYMBOL_GPL(spi_add_device);
627
628 /**
629 * spi_new_device - instantiate one new SPI device
630 * @ctlr: Controller to which device is connected
631 * @chip: Describes the SPI device
632 * Context: can sleep
633 *
634 * On typical mainboards, this is purely internal; and it's not needed
635 * after board init creates the hard-wired devices. Some development
636 * platforms may not be able to use spi_register_board_info though, and
637 * this is exported so that for example a USB or parport based adapter
638 * driver could add devices (which it would learn about out-of-band).
639 *
640 * Return: the new device, or NULL.
641 */
642 struct spi_device *spi_new_device(struct spi_controller *ctlr,
643 struct spi_board_info *chip)
644 {
645 struct spi_device *proxy;
646 int status;
647
648 /* NOTE: caller did any chip->bus_num checks necessary.
649 *
650 * Also, unless we change the return value convention to use
651 * error-or-pointer (not NULL-or-pointer), troubleshootability
652 * suggests syslogged diagnostics are best here (ugh).
653 */
654
655 proxy = spi_alloc_device(ctlr);
656 if (!proxy)
657 return NULL;
658
659 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
660
661 proxy->chip_select = chip->chip_select;
662 proxy->max_speed_hz = chip->max_speed_hz;
663 proxy->mode = chip->mode;
664 proxy->irq = chip->irq;
665 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
666 proxy->dev.platform_data = (void *) chip->platform_data;
667 proxy->controller_data = chip->controller_data;
668 proxy->controller_state = NULL;
669
670 if (chip->properties) {
671 status = device_add_properties(&proxy->dev, chip->properties);
672 if (status) {
673 dev_err(&ctlr->dev,
674 "failed to add properties to '%s': %d\n",
675 chip->modalias, status);
676 goto err_dev_put;
677 }
678 }
679
680 status = spi_add_device(proxy);
681 if (status < 0)
682 goto err_remove_props;
683
684 return proxy;
685
686 err_remove_props:
687 if (chip->properties)
688 device_remove_properties(&proxy->dev);
689 err_dev_put:
690 spi_dev_put(proxy);
691 return NULL;
692 }
693 EXPORT_SYMBOL_GPL(spi_new_device);
694
695 /**
696 * spi_unregister_device - unregister a single SPI device
697 * @spi: spi_device to unregister
698 *
699 * Start making the passed SPI device vanish. Normally this would be handled
700 * by spi_unregister_controller().
701 */
702 void spi_unregister_device(struct spi_device *spi)
703 {
704 if (!spi)
705 return;
706
707 if (spi->dev.of_node) {
708 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
709 of_node_put(spi->dev.of_node);
710 }
711 if (ACPI_COMPANION(&spi->dev))
712 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
713 device_unregister(&spi->dev);
714 }
715 EXPORT_SYMBOL_GPL(spi_unregister_device);
716
717 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
718 struct spi_board_info *bi)
719 {
720 struct spi_device *dev;
721
722 if (ctlr->bus_num != bi->bus_num)
723 return;
724
725 dev = spi_new_device(ctlr, bi);
726 if (!dev)
727 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
728 bi->modalias);
729 }
730
731 /**
732 * spi_register_board_info - register SPI devices for a given board
733 * @info: array of chip descriptors
734 * @n: how many descriptors are provided
735 * Context: can sleep
736 *
737 * Board-specific early init code calls this (probably during arch_initcall)
738 * with segments of the SPI device table. Any device nodes are created later,
739 * after the relevant parent SPI controller (bus_num) is defined. We keep
740 * this table of devices forever, so that reloading a controller driver will
741 * not make Linux forget about these hard-wired devices.
742 *
743 * Other code can also call this, e.g. a particular add-on board might provide
744 * SPI devices through its expansion connector, so code initializing that board
745 * would naturally declare its SPI devices.
746 *
747 * The board info passed can safely be __initdata ... but be careful of
748 * any embedded pointers (platform_data, etc), they're copied as-is.
749 * Device properties are deep-copied though.
750 *
751 * Return: zero on success, else a negative error code.
752 */
753 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
754 {
755 struct boardinfo *bi;
756 int i;
757
758 if (!n)
759 return 0;
760
761 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
762 if (!bi)
763 return -ENOMEM;
764
765 for (i = 0; i < n; i++, bi++, info++) {
766 struct spi_controller *ctlr;
767
768 memcpy(&bi->board_info, info, sizeof(*info));
769 if (info->properties) {
770 bi->board_info.properties =
771 property_entries_dup(info->properties);
772 if (IS_ERR(bi->board_info.properties))
773 return PTR_ERR(bi->board_info.properties);
774 }
775
776 mutex_lock(&board_lock);
777 list_add_tail(&bi->list, &board_list);
778 list_for_each_entry(ctlr, &spi_controller_list, list)
779 spi_match_controller_to_boardinfo(ctlr,
780 &bi->board_info);
781 mutex_unlock(&board_lock);
782 }
783
784 return 0;
785 }
786
787 /*-------------------------------------------------------------------------*/
788
789 static void spi_set_cs(struct spi_device *spi, bool enable)
790 {
791 bool enable1 = enable;
792
793 /*
794 * Avoid calling into the driver (or doing delays) if the chip select
795 * isn't actually changing from the last time this was called.
796 */
797 if ((spi->controller->last_cs_enable == enable) &&
798 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
799 return;
800
801 spi->controller->last_cs_enable = enable;
802 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
803
804 if (!spi->controller->set_cs_timing) {
805 if (enable1)
806 spi_delay_exec(&spi->controller->cs_setup, NULL);
807 else
808 spi_delay_exec(&spi->controller->cs_hold, NULL);
809 }
810
811 if (spi->mode & SPI_CS_HIGH)
812 enable = !enable;
813
814 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
815 if (!(spi->mode & SPI_NO_CS)) {
816 if (spi->cs_gpiod)
817 /* polarity handled by gpiolib */
818 gpiod_set_value_cansleep(spi->cs_gpiod,
819 enable1);
820 else
821 /*
822 * invert the enable line, as active low is
823 * default for SPI.
824 */
825 gpio_set_value_cansleep(spi->cs_gpio, !enable);
826 }
827 /* Some SPI masters need both GPIO CS & slave_select */
828 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
829 spi->controller->set_cs)
830 spi->controller->set_cs(spi, !enable);
831 } else if (spi->controller->set_cs) {
832 spi->controller->set_cs(spi, !enable);
833 }
834
835 if (!spi->controller->set_cs_timing) {
836 if (!enable1)
837 spi_delay_exec(&spi->controller->cs_inactive, NULL);
838 }
839 }
840
841 #ifdef CONFIG_HAS_DMA
842 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
843 struct sg_table *sgt, void *buf, size_t len,
844 enum dma_data_direction dir)
845 {
846 const bool vmalloced_buf = is_vmalloc_addr(buf);
847 unsigned int max_seg_size = dma_get_max_seg_size(dev);
848 #ifdef CONFIG_HIGHMEM
849 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
850 (unsigned long)buf < (PKMAP_BASE +
851 (LAST_PKMAP * PAGE_SIZE)));
852 #else
853 const bool kmap_buf = false;
854 #endif
855 int desc_len;
856 int sgs;
857 struct page *vm_page;
858 struct scatterlist *sg;
859 void *sg_buf;
860 size_t min;
861 int i, ret;
862
863 if (vmalloced_buf || kmap_buf) {
864 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
865 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
866 } else if (virt_addr_valid(buf)) {
867 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
868 sgs = DIV_ROUND_UP(len, desc_len);
869 } else {
870 return -EINVAL;
871 }
872
873 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
874 if (ret != 0)
875 return ret;
876
877 sg = &sgt->sgl[0];
878 for (i = 0; i < sgs; i++) {
879
880 if (vmalloced_buf || kmap_buf) {
881 /*
882 * Next scatterlist entry size is the minimum between
883 * the desc_len and the remaining buffer length that
884 * fits in a page.
885 */
886 min = min_t(size_t, desc_len,
887 min_t(size_t, len,
888 PAGE_SIZE - offset_in_page(buf)));
889 if (vmalloced_buf)
890 vm_page = vmalloc_to_page(buf);
891 else
892 vm_page = kmap_to_page(buf);
893 if (!vm_page) {
894 sg_free_table(sgt);
895 return -ENOMEM;
896 }
897 sg_set_page(sg, vm_page,
898 min, offset_in_page(buf));
899 } else {
900 min = min_t(size_t, len, desc_len);
901 sg_buf = buf;
902 sg_set_buf(sg, sg_buf, min);
903 }
904
905 buf += min;
906 len -= min;
907 sg = sg_next(sg);
908 }
909
910 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
911 if (!ret)
912 ret = -ENOMEM;
913 if (ret < 0) {
914 sg_free_table(sgt);
915 return ret;
916 }
917
918 sgt->nents = ret;
919
920 return 0;
921 }
922
923 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
924 struct sg_table *sgt, enum dma_data_direction dir)
925 {
926 if (sgt->orig_nents) {
927 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
928 sg_free_table(sgt);
929 }
930 }
931
932 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
933 {
934 struct device *tx_dev, *rx_dev;
935 struct spi_transfer *xfer;
936 int ret;
937
938 if (!ctlr->can_dma)
939 return 0;
940
941 if (ctlr->dma_tx)
942 tx_dev = ctlr->dma_tx->device->dev;
943 else
944 tx_dev = ctlr->dev.parent;
945
946 if (ctlr->dma_rx)
947 rx_dev = ctlr->dma_rx->device->dev;
948 else
949 rx_dev = ctlr->dev.parent;
950
951 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
952 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
953 continue;
954
955 if (xfer->tx_buf != NULL) {
956 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
957 (void *)xfer->tx_buf, xfer->len,
958 DMA_TO_DEVICE);
959 if (ret != 0)
960 return ret;
961 }
962
963 if (xfer->rx_buf != NULL) {
964 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
965 xfer->rx_buf, xfer->len,
966 DMA_FROM_DEVICE);
967 if (ret != 0) {
968 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
969 DMA_TO_DEVICE);
970 return ret;
971 }
972 }
973 }
974
975 ctlr->cur_msg_mapped = true;
976
977 return 0;
978 }
979
980 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
981 {
982 struct spi_transfer *xfer;
983 struct device *tx_dev, *rx_dev;
984
985 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
986 return 0;
987
988 if (ctlr->dma_tx)
989 tx_dev = ctlr->dma_tx->device->dev;
990 else
991 tx_dev = ctlr->dev.parent;
992
993 if (ctlr->dma_rx)
994 rx_dev = ctlr->dma_rx->device->dev;
995 else
996 rx_dev = ctlr->dev.parent;
997
998 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
999 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1000 continue;
1001
1002 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1003 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1004 }
1005
1006 ctlr->cur_msg_mapped = false;
1007
1008 return 0;
1009 }
1010 #else /* !CONFIG_HAS_DMA */
1011 static inline int __spi_map_msg(struct spi_controller *ctlr,
1012 struct spi_message *msg)
1013 {
1014 return 0;
1015 }
1016
1017 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1018 struct spi_message *msg)
1019 {
1020 return 0;
1021 }
1022 #endif /* !CONFIG_HAS_DMA */
1023
1024 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1025 struct spi_message *msg)
1026 {
1027 struct spi_transfer *xfer;
1028
1029 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1030 /*
1031 * Restore the original value of tx_buf or rx_buf if they are
1032 * NULL.
1033 */
1034 if (xfer->tx_buf == ctlr->dummy_tx)
1035 xfer->tx_buf = NULL;
1036 if (xfer->rx_buf == ctlr->dummy_rx)
1037 xfer->rx_buf = NULL;
1038 }
1039
1040 return __spi_unmap_msg(ctlr, msg);
1041 }
1042
1043 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1044 {
1045 struct spi_transfer *xfer;
1046 void *tmp;
1047 unsigned int max_tx, max_rx;
1048
1049 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1050 && !(msg->spi->mode & SPI_3WIRE)) {
1051 max_tx = 0;
1052 max_rx = 0;
1053
1054 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1055 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1056 !xfer->tx_buf)
1057 max_tx = max(xfer->len, max_tx);
1058 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1059 !xfer->rx_buf)
1060 max_rx = max(xfer->len, max_rx);
1061 }
1062
1063 if (max_tx) {
1064 tmp = krealloc(ctlr->dummy_tx, max_tx,
1065 GFP_KERNEL | GFP_DMA);
1066 if (!tmp)
1067 return -ENOMEM;
1068 ctlr->dummy_tx = tmp;
1069 memset(tmp, 0, max_tx);
1070 }
1071
1072 if (max_rx) {
1073 tmp = krealloc(ctlr->dummy_rx, max_rx,
1074 GFP_KERNEL | GFP_DMA);
1075 if (!tmp)
1076 return -ENOMEM;
1077 ctlr->dummy_rx = tmp;
1078 }
1079
1080 if (max_tx || max_rx) {
1081 list_for_each_entry(xfer, &msg->transfers,
1082 transfer_list) {
1083 if (!xfer->len)
1084 continue;
1085 if (!xfer->tx_buf)
1086 xfer->tx_buf = ctlr->dummy_tx;
1087 if (!xfer->rx_buf)
1088 xfer->rx_buf = ctlr->dummy_rx;
1089 }
1090 }
1091 }
1092
1093 return __spi_map_msg(ctlr, msg);
1094 }
1095
1096 static int spi_transfer_wait(struct spi_controller *ctlr,
1097 struct spi_message *msg,
1098 struct spi_transfer *xfer)
1099 {
1100 struct spi_statistics *statm = &ctlr->statistics;
1101 struct spi_statistics *stats = &msg->spi->statistics;
1102 unsigned long long ms;
1103
1104 if (spi_controller_is_slave(ctlr)) {
1105 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1106 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1107 return -EINTR;
1108 }
1109 } else {
1110 ms = 8LL * 1000LL * xfer->len;
1111 do_div(ms, xfer->speed_hz);
1112 ms += ms + 200; /* some tolerance */
1113
1114 if (ms > UINT_MAX)
1115 ms = UINT_MAX;
1116
1117 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1118 msecs_to_jiffies(ms));
1119
1120 if (ms == 0) {
1121 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1122 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1123 dev_err(&msg->spi->dev,
1124 "SPI transfer timed out\n");
1125 return -ETIMEDOUT;
1126 }
1127 }
1128
1129 return 0;
1130 }
1131
1132 static void _spi_transfer_delay_ns(u32 ns)
1133 {
1134 if (!ns)
1135 return;
1136 if (ns <= 1000) {
1137 ndelay(ns);
1138 } else {
1139 u32 us = DIV_ROUND_UP(ns, 1000);
1140
1141 if (us <= 10)
1142 udelay(us);
1143 else
1144 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1145 }
1146 }
1147
1148 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1149 {
1150 u32 delay = _delay->value;
1151 u32 unit = _delay->unit;
1152 u32 hz;
1153
1154 if (!delay)
1155 return 0;
1156
1157 switch (unit) {
1158 case SPI_DELAY_UNIT_USECS:
1159 delay *= 1000;
1160 break;
1161 case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
1162 break;
1163 case SPI_DELAY_UNIT_SCK:
1164 /* clock cycles need to be obtained from spi_transfer */
1165 if (!xfer)
1166 return -EINVAL;
1167 /* if there is no effective speed know, then approximate
1168 * by underestimating with half the requested hz
1169 */
1170 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1171 if (!hz)
1172 return -EINVAL;
1173 delay *= DIV_ROUND_UP(1000000000, hz);
1174 break;
1175 default:
1176 return -EINVAL;
1177 }
1178
1179 return delay;
1180 }
1181 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1182
1183 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1184 {
1185 int delay;
1186
1187 might_sleep();
1188
1189 if (!_delay)
1190 return -EINVAL;
1191
1192 delay = spi_delay_to_ns(_delay, xfer);
1193 if (delay < 0)
1194 return delay;
1195
1196 _spi_transfer_delay_ns(delay);
1197
1198 return 0;
1199 }
1200 EXPORT_SYMBOL_GPL(spi_delay_exec);
1201
1202 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1203 struct spi_transfer *xfer)
1204 {
1205 u32 delay = xfer->cs_change_delay.value;
1206 u32 unit = xfer->cs_change_delay.unit;
1207 int ret;
1208
1209 /* return early on "fast" mode - for everything but USECS */
1210 if (!delay) {
1211 if (unit == SPI_DELAY_UNIT_USECS)
1212 _spi_transfer_delay_ns(10000);
1213 return;
1214 }
1215
1216 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1217 if (ret) {
1218 dev_err_once(&msg->spi->dev,
1219 "Use of unsupported delay unit %i, using default of 10us\n",
1220 unit);
1221 _spi_transfer_delay_ns(10000);
1222 }
1223 }
1224
1225 /*
1226 * spi_transfer_one_message - Default implementation of transfer_one_message()
1227 *
1228 * This is a standard implementation of transfer_one_message() for
1229 * drivers which implement a transfer_one() operation. It provides
1230 * standard handling of delays and chip select management.
1231 */
1232 static int spi_transfer_one_message(struct spi_controller *ctlr,
1233 struct spi_message *msg)
1234 {
1235 struct spi_transfer *xfer;
1236 bool keep_cs = false;
1237 int ret = 0;
1238 struct spi_statistics *statm = &ctlr->statistics;
1239 struct spi_statistics *stats = &msg->spi->statistics;
1240
1241 spi_set_cs(msg->spi, true);
1242
1243 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1244 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1245
1246 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1247 trace_spi_transfer_start(msg, xfer);
1248
1249 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1250 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1251
1252 if (!ctlr->ptp_sts_supported) {
1253 xfer->ptp_sts_word_pre = 0;
1254 ptp_read_system_prets(xfer->ptp_sts);
1255 }
1256
1257 if (xfer->tx_buf || xfer->rx_buf) {
1258 reinit_completion(&ctlr->xfer_completion);
1259
1260 fallback_pio:
1261 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1262 if (ret < 0) {
1263 if (ctlr->cur_msg_mapped &&
1264 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1265 __spi_unmap_msg(ctlr, msg);
1266 ctlr->fallback = true;
1267 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1268 goto fallback_pio;
1269 }
1270
1271 SPI_STATISTICS_INCREMENT_FIELD(statm,
1272 errors);
1273 SPI_STATISTICS_INCREMENT_FIELD(stats,
1274 errors);
1275 dev_err(&msg->spi->dev,
1276 "SPI transfer failed: %d\n", ret);
1277 goto out;
1278 }
1279
1280 if (ret > 0) {
1281 ret = spi_transfer_wait(ctlr, msg, xfer);
1282 if (ret < 0)
1283 msg->status = ret;
1284 }
1285 } else {
1286 if (xfer->len)
1287 dev_err(&msg->spi->dev,
1288 "Bufferless transfer has length %u\n",
1289 xfer->len);
1290 }
1291
1292 if (!ctlr->ptp_sts_supported) {
1293 ptp_read_system_postts(xfer->ptp_sts);
1294 xfer->ptp_sts_word_post = xfer->len;
1295 }
1296
1297 trace_spi_transfer_stop(msg, xfer);
1298
1299 if (msg->status != -EINPROGRESS)
1300 goto out;
1301
1302 spi_transfer_delay_exec(xfer);
1303
1304 if (xfer->cs_change) {
1305 if (list_is_last(&xfer->transfer_list,
1306 &msg->transfers)) {
1307 keep_cs = true;
1308 } else {
1309 spi_set_cs(msg->spi, false);
1310 _spi_transfer_cs_change_delay(msg, xfer);
1311 spi_set_cs(msg->spi, true);
1312 }
1313 }
1314
1315 msg->actual_length += xfer->len;
1316 }
1317
1318 out:
1319 if (ret != 0 || !keep_cs)
1320 spi_set_cs(msg->spi, false);
1321
1322 if (msg->status == -EINPROGRESS)
1323 msg->status = ret;
1324
1325 if (msg->status && ctlr->handle_err)
1326 ctlr->handle_err(ctlr, msg);
1327
1328 spi_finalize_current_message(ctlr);
1329
1330 return ret;
1331 }
1332
1333 /**
1334 * spi_finalize_current_transfer - report completion of a transfer
1335 * @ctlr: the controller reporting completion
1336 *
1337 * Called by SPI drivers using the core transfer_one_message()
1338 * implementation to notify it that the current interrupt driven
1339 * transfer has finished and the next one may be scheduled.
1340 */
1341 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1342 {
1343 complete(&ctlr->xfer_completion);
1344 }
1345 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1346
1347 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1348 {
1349 if (ctlr->auto_runtime_pm) {
1350 pm_runtime_mark_last_busy(ctlr->dev.parent);
1351 pm_runtime_put_autosuspend(ctlr->dev.parent);
1352 }
1353 }
1354
1355 /**
1356 * __spi_pump_messages - function which processes spi message queue
1357 * @ctlr: controller to process queue for
1358 * @in_kthread: true if we are in the context of the message pump thread
1359 *
1360 * This function checks if there is any spi message in the queue that
1361 * needs processing and if so call out to the driver to initialize hardware
1362 * and transfer each message.
1363 *
1364 * Note that it is called both from the kthread itself and also from
1365 * inside spi_sync(); the queue extraction handling at the top of the
1366 * function should deal with this safely.
1367 */
1368 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1369 {
1370 struct spi_transfer *xfer;
1371 struct spi_message *msg;
1372 bool was_busy = false;
1373 unsigned long flags;
1374 int ret;
1375
1376 /* Lock queue */
1377 spin_lock_irqsave(&ctlr->queue_lock, flags);
1378
1379 /* Make sure we are not already running a message */
1380 if (ctlr->cur_msg) {
1381 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1382 return;
1383 }
1384
1385 /* If another context is idling the device then defer */
1386 if (ctlr->idling) {
1387 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1388 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1389 return;
1390 }
1391
1392 /* Check if the queue is idle */
1393 if (list_empty(&ctlr->queue) || !ctlr->running) {
1394 if (!ctlr->busy) {
1395 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1396 return;
1397 }
1398
1399 /* Defer any non-atomic teardown to the thread */
1400 if (!in_kthread) {
1401 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1402 !ctlr->unprepare_transfer_hardware) {
1403 spi_idle_runtime_pm(ctlr);
1404 ctlr->busy = false;
1405 trace_spi_controller_idle(ctlr);
1406 } else {
1407 kthread_queue_work(ctlr->kworker,
1408 &ctlr->pump_messages);
1409 }
1410 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1411 return;
1412 }
1413
1414 ctlr->busy = false;
1415 ctlr->idling = true;
1416 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1417
1418 kfree(ctlr->dummy_rx);
1419 ctlr->dummy_rx = NULL;
1420 kfree(ctlr->dummy_tx);
1421 ctlr->dummy_tx = NULL;
1422 if (ctlr->unprepare_transfer_hardware &&
1423 ctlr->unprepare_transfer_hardware(ctlr))
1424 dev_err(&ctlr->dev,
1425 "failed to unprepare transfer hardware\n");
1426 spi_idle_runtime_pm(ctlr);
1427 trace_spi_controller_idle(ctlr);
1428
1429 spin_lock_irqsave(&ctlr->queue_lock, flags);
1430 ctlr->idling = false;
1431 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1432 return;
1433 }
1434
1435 /* Extract head of queue */
1436 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1437 ctlr->cur_msg = msg;
1438
1439 list_del_init(&msg->queue);
1440 if (ctlr->busy)
1441 was_busy = true;
1442 else
1443 ctlr->busy = true;
1444 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1445
1446 mutex_lock(&ctlr->io_mutex);
1447
1448 if (!was_busy && ctlr->auto_runtime_pm) {
1449 ret = pm_runtime_get_sync(ctlr->dev.parent);
1450 if (ret < 0) {
1451 pm_runtime_put_noidle(ctlr->dev.parent);
1452 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1453 ret);
1454 mutex_unlock(&ctlr->io_mutex);
1455 return;
1456 }
1457 }
1458
1459 if (!was_busy)
1460 trace_spi_controller_busy(ctlr);
1461
1462 if (!was_busy && ctlr->prepare_transfer_hardware) {
1463 ret = ctlr->prepare_transfer_hardware(ctlr);
1464 if (ret) {
1465 dev_err(&ctlr->dev,
1466 "failed to prepare transfer hardware: %d\n",
1467 ret);
1468
1469 if (ctlr->auto_runtime_pm)
1470 pm_runtime_put(ctlr->dev.parent);
1471
1472 msg->status = ret;
1473 spi_finalize_current_message(ctlr);
1474
1475 mutex_unlock(&ctlr->io_mutex);
1476 return;
1477 }
1478 }
1479
1480 trace_spi_message_start(msg);
1481
1482 if (ctlr->prepare_message) {
1483 ret = ctlr->prepare_message(ctlr, msg);
1484 if (ret) {
1485 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1486 ret);
1487 msg->status = ret;
1488 spi_finalize_current_message(ctlr);
1489 goto out;
1490 }
1491 ctlr->cur_msg_prepared = true;
1492 }
1493
1494 ret = spi_map_msg(ctlr, msg);
1495 if (ret) {
1496 msg->status = ret;
1497 spi_finalize_current_message(ctlr);
1498 goto out;
1499 }
1500
1501 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1502 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1503 xfer->ptp_sts_word_pre = 0;
1504 ptp_read_system_prets(xfer->ptp_sts);
1505 }
1506 }
1507
1508 ret = ctlr->transfer_one_message(ctlr, msg);
1509 if (ret) {
1510 dev_err(&ctlr->dev,
1511 "failed to transfer one message from queue\n");
1512 goto out;
1513 }
1514
1515 out:
1516 mutex_unlock(&ctlr->io_mutex);
1517
1518 /* Prod the scheduler in case transfer_one() was busy waiting */
1519 if (!ret)
1520 cond_resched();
1521 }
1522
1523 /**
1524 * spi_pump_messages - kthread work function which processes spi message queue
1525 * @work: pointer to kthread work struct contained in the controller struct
1526 */
1527 static void spi_pump_messages(struct kthread_work *work)
1528 {
1529 struct spi_controller *ctlr =
1530 container_of(work, struct spi_controller, pump_messages);
1531
1532 __spi_pump_messages(ctlr, true);
1533 }
1534
1535 /**
1536 * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
1537 * TX timestamp for the requested byte from the SPI
1538 * transfer. The frequency with which this function
1539 * must be called (once per word, once for the whole
1540 * transfer, once per batch of words etc) is arbitrary
1541 * as long as the @tx buffer offset is greater than or
1542 * equal to the requested byte at the time of the
1543 * call. The timestamp is only taken once, at the
1544 * first such call. It is assumed that the driver
1545 * advances its @tx buffer pointer monotonically.
1546 * @ctlr: Pointer to the spi_controller structure of the driver
1547 * @xfer: Pointer to the transfer being timestamped
1548 * @progress: How many words (not bytes) have been transferred so far
1549 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1550 * transfer, for less jitter in time measurement. Only compatible
1551 * with PIO drivers. If true, must follow up with
1552 * spi_take_timestamp_post or otherwise system will crash.
1553 * WARNING: for fully predictable results, the CPU frequency must
1554 * also be under control (governor).
1555 */
1556 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1557 struct spi_transfer *xfer,
1558 size_t progress, bool irqs_off)
1559 {
1560 if (!xfer->ptp_sts)
1561 return;
1562
1563 if (xfer->timestamped)
1564 return;
1565
1566 if (progress > xfer->ptp_sts_word_pre)
1567 return;
1568
1569 /* Capture the resolution of the timestamp */
1570 xfer->ptp_sts_word_pre = progress;
1571
1572 if (irqs_off) {
1573 local_irq_save(ctlr->irq_flags);
1574 preempt_disable();
1575 }
1576
1577 ptp_read_system_prets(xfer->ptp_sts);
1578 }
1579 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1580
1581 /**
1582 * spi_take_timestamp_post - helper for drivers to collect the end of the
1583 * TX timestamp for the requested byte from the SPI
1584 * transfer. Can be called with an arbitrary
1585 * frequency: only the first call where @tx exceeds
1586 * or is equal to the requested word will be
1587 * timestamped.
1588 * @ctlr: Pointer to the spi_controller structure of the driver
1589 * @xfer: Pointer to the transfer being timestamped
1590 * @progress: How many words (not bytes) have been transferred so far
1591 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1592 */
1593 void spi_take_timestamp_post(struct spi_controller *ctlr,
1594 struct spi_transfer *xfer,
1595 size_t progress, bool irqs_off)
1596 {
1597 if (!xfer->ptp_sts)
1598 return;
1599
1600 if (xfer->timestamped)
1601 return;
1602
1603 if (progress < xfer->ptp_sts_word_post)
1604 return;
1605
1606 ptp_read_system_postts(xfer->ptp_sts);
1607
1608 if (irqs_off) {
1609 local_irq_restore(ctlr->irq_flags);
1610 preempt_enable();
1611 }
1612
1613 /* Capture the resolution of the timestamp */
1614 xfer->ptp_sts_word_post = progress;
1615
1616 xfer->timestamped = true;
1617 }
1618 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1619
1620 /**
1621 * spi_set_thread_rt - set the controller to pump at realtime priority
1622 * @ctlr: controller to boost priority of
1623 *
1624 * This can be called because the controller requested realtime priority
1625 * (by setting the ->rt value before calling spi_register_controller()) or
1626 * because a device on the bus said that its transfers needed realtime
1627 * priority.
1628 *
1629 * NOTE: at the moment if any device on a bus says it needs realtime then
1630 * the thread will be at realtime priority for all transfers on that
1631 * controller. If this eventually becomes a problem we may see if we can
1632 * find a way to boost the priority only temporarily during relevant
1633 * transfers.
1634 */
1635 static void spi_set_thread_rt(struct spi_controller *ctlr)
1636 {
1637 dev_info(&ctlr->dev,
1638 "will run message pump with realtime priority\n");
1639 sched_set_fifo(ctlr->kworker->task);
1640 }
1641
1642 static int spi_init_queue(struct spi_controller *ctlr)
1643 {
1644 ctlr->running = false;
1645 ctlr->busy = false;
1646
1647 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1648 if (IS_ERR(ctlr->kworker)) {
1649 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1650 return PTR_ERR(ctlr->kworker);
1651 }
1652
1653 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1654
1655 /*
1656 * Controller config will indicate if this controller should run the
1657 * message pump with high (realtime) priority to reduce the transfer
1658 * latency on the bus by minimising the delay between a transfer
1659 * request and the scheduling of the message pump thread. Without this
1660 * setting the message pump thread will remain at default priority.
1661 */
1662 if (ctlr->rt)
1663 spi_set_thread_rt(ctlr);
1664
1665 return 0;
1666 }
1667
1668 /**
1669 * spi_get_next_queued_message() - called by driver to check for queued
1670 * messages
1671 * @ctlr: the controller to check for queued messages
1672 *
1673 * If there are more messages in the queue, the next message is returned from
1674 * this call.
1675 *
1676 * Return: the next message in the queue, else NULL if the queue is empty.
1677 */
1678 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1679 {
1680 struct spi_message *next;
1681 unsigned long flags;
1682
1683 /* get a pointer to the next message, if any */
1684 spin_lock_irqsave(&ctlr->queue_lock, flags);
1685 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1686 queue);
1687 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1688
1689 return next;
1690 }
1691 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1692
1693 /**
1694 * spi_finalize_current_message() - the current message is complete
1695 * @ctlr: the controller to return the message to
1696 *
1697 * Called by the driver to notify the core that the message in the front of the
1698 * queue is complete and can be removed from the queue.
1699 */
1700 void spi_finalize_current_message(struct spi_controller *ctlr)
1701 {
1702 struct spi_transfer *xfer;
1703 struct spi_message *mesg;
1704 unsigned long flags;
1705 int ret;
1706
1707 spin_lock_irqsave(&ctlr->queue_lock, flags);
1708 mesg = ctlr->cur_msg;
1709 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1710
1711 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1712 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1713 ptp_read_system_postts(xfer->ptp_sts);
1714 xfer->ptp_sts_word_post = xfer->len;
1715 }
1716 }
1717
1718 if (unlikely(ctlr->ptp_sts_supported))
1719 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
1720 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1721
1722 spi_unmap_msg(ctlr, mesg);
1723
1724 /* In the prepare_messages callback the spi bus has the opportunity to
1725 * split a transfer to smaller chunks.
1726 * Release splited transfers here since spi_map_msg is done on the
1727 * splited transfers.
1728 */
1729 spi_res_release(ctlr, mesg);
1730
1731 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1732 ret = ctlr->unprepare_message(ctlr, mesg);
1733 if (ret) {
1734 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1735 ret);
1736 }
1737 }
1738
1739 spin_lock_irqsave(&ctlr->queue_lock, flags);
1740 ctlr->cur_msg = NULL;
1741 ctlr->cur_msg_prepared = false;
1742 ctlr->fallback = false;
1743 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1744 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1745
1746 trace_spi_message_done(mesg);
1747
1748 mesg->state = NULL;
1749 if (mesg->complete)
1750 mesg->complete(mesg->context);
1751 }
1752 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1753
1754 static int spi_start_queue(struct spi_controller *ctlr)
1755 {
1756 unsigned long flags;
1757
1758 spin_lock_irqsave(&ctlr->queue_lock, flags);
1759
1760 if (ctlr->running || ctlr->busy) {
1761 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1762 return -EBUSY;
1763 }
1764
1765 ctlr->running = true;
1766 ctlr->cur_msg = NULL;
1767 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1768
1769 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1770
1771 return 0;
1772 }
1773
1774 static int spi_stop_queue(struct spi_controller *ctlr)
1775 {
1776 unsigned long flags;
1777 unsigned limit = 500;
1778 int ret = 0;
1779
1780 spin_lock_irqsave(&ctlr->queue_lock, flags);
1781
1782 /*
1783 * This is a bit lame, but is optimized for the common execution path.
1784 * A wait_queue on the ctlr->busy could be used, but then the common
1785 * execution path (pump_messages) would be required to call wake_up or
1786 * friends on every SPI message. Do this instead.
1787 */
1788 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1789 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1790 usleep_range(10000, 11000);
1791 spin_lock_irqsave(&ctlr->queue_lock, flags);
1792 }
1793
1794 if (!list_empty(&ctlr->queue) || ctlr->busy)
1795 ret = -EBUSY;
1796 else
1797 ctlr->running = false;
1798
1799 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1800
1801 if (ret) {
1802 dev_warn(&ctlr->dev, "could not stop message queue\n");
1803 return ret;
1804 }
1805 return ret;
1806 }
1807
1808 static int spi_destroy_queue(struct spi_controller *ctlr)
1809 {
1810 int ret;
1811
1812 ret = spi_stop_queue(ctlr);
1813
1814 /*
1815 * kthread_flush_worker will block until all work is done.
1816 * If the reason that stop_queue timed out is that the work will never
1817 * finish, then it does no good to call flush/stop thread, so
1818 * return anyway.
1819 */
1820 if (ret) {
1821 dev_err(&ctlr->dev, "problem destroying queue\n");
1822 return ret;
1823 }
1824
1825 kthread_destroy_worker(ctlr->kworker);
1826
1827 return 0;
1828 }
1829
1830 static int __spi_queued_transfer(struct spi_device *spi,
1831 struct spi_message *msg,
1832 bool need_pump)
1833 {
1834 struct spi_controller *ctlr = spi->controller;
1835 unsigned long flags;
1836
1837 spin_lock_irqsave(&ctlr->queue_lock, flags);
1838
1839 if (!ctlr->running) {
1840 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1841 return -ESHUTDOWN;
1842 }
1843 msg->actual_length = 0;
1844 msg->status = -EINPROGRESS;
1845
1846 list_add_tail(&msg->queue, &ctlr->queue);
1847 if (!ctlr->busy && need_pump)
1848 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1849
1850 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1851 return 0;
1852 }
1853
1854 /**
1855 * spi_queued_transfer - transfer function for queued transfers
1856 * @spi: spi device which is requesting transfer
1857 * @msg: spi message which is to handled is queued to driver queue
1858 *
1859 * Return: zero on success, else a negative error code.
1860 */
1861 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1862 {
1863 return __spi_queued_transfer(spi, msg, true);
1864 }
1865
1866 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1867 {
1868 int ret;
1869
1870 ctlr->transfer = spi_queued_transfer;
1871 if (!ctlr->transfer_one_message)
1872 ctlr->transfer_one_message = spi_transfer_one_message;
1873
1874 /* Initialize and start queue */
1875 ret = spi_init_queue(ctlr);
1876 if (ret) {
1877 dev_err(&ctlr->dev, "problem initializing queue\n");
1878 goto err_init_queue;
1879 }
1880 ctlr->queued = true;
1881 ret = spi_start_queue(ctlr);
1882 if (ret) {
1883 dev_err(&ctlr->dev, "problem starting queue\n");
1884 goto err_start_queue;
1885 }
1886
1887 return 0;
1888
1889 err_start_queue:
1890 spi_destroy_queue(ctlr);
1891 err_init_queue:
1892 return ret;
1893 }
1894
1895 /**
1896 * spi_flush_queue - Send all pending messages in the queue from the callers'
1897 * context
1898 * @ctlr: controller to process queue for
1899 *
1900 * This should be used when one wants to ensure all pending messages have been
1901 * sent before doing something. Is used by the spi-mem code to make sure SPI
1902 * memory operations do not preempt regular SPI transfers that have been queued
1903 * before the spi-mem operation.
1904 */
1905 void spi_flush_queue(struct spi_controller *ctlr)
1906 {
1907 if (ctlr->transfer == spi_queued_transfer)
1908 __spi_pump_messages(ctlr, false);
1909 }
1910
1911 /*-------------------------------------------------------------------------*/
1912
1913 #if defined(CONFIG_OF)
1914 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1915 struct device_node *nc)
1916 {
1917 u32 value;
1918 int rc;
1919
1920 /* Mode (clock phase/polarity/etc.) */
1921 if (of_property_read_bool(nc, "spi-cpha"))
1922 spi->mode |= SPI_CPHA;
1923 if (of_property_read_bool(nc, "spi-cpol"))
1924 spi->mode |= SPI_CPOL;
1925 if (of_property_read_bool(nc, "spi-3wire"))
1926 spi->mode |= SPI_3WIRE;
1927 if (of_property_read_bool(nc, "spi-lsb-first"))
1928 spi->mode |= SPI_LSB_FIRST;
1929 if (of_property_read_bool(nc, "spi-cs-high"))
1930 spi->mode |= SPI_CS_HIGH;
1931
1932 /* Device DUAL/QUAD mode */
1933 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1934 switch (value) {
1935 case 1:
1936 break;
1937 case 2:
1938 spi->mode |= SPI_TX_DUAL;
1939 break;
1940 case 4:
1941 spi->mode |= SPI_TX_QUAD;
1942 break;
1943 case 8:
1944 spi->mode |= SPI_TX_OCTAL;
1945 break;
1946 default:
1947 dev_warn(&ctlr->dev,
1948 "spi-tx-bus-width %d not supported\n",
1949 value);
1950 break;
1951 }
1952 }
1953
1954 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1955 switch (value) {
1956 case 1:
1957 break;
1958 case 2:
1959 spi->mode |= SPI_RX_DUAL;
1960 break;
1961 case 4:
1962 spi->mode |= SPI_RX_QUAD;
1963 break;
1964 case 8:
1965 spi->mode |= SPI_RX_OCTAL;
1966 break;
1967 default:
1968 dev_warn(&ctlr->dev,
1969 "spi-rx-bus-width %d not supported\n",
1970 value);
1971 break;
1972 }
1973 }
1974
1975 if (spi_controller_is_slave(ctlr)) {
1976 if (!of_node_name_eq(nc, "slave")) {
1977 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1978 nc);
1979 return -EINVAL;
1980 }
1981 return 0;
1982 }
1983
1984 /* Device address */
1985 rc = of_property_read_u32(nc, "reg", &value);
1986 if (rc) {
1987 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1988 nc, rc);
1989 return rc;
1990 }
1991 spi->chip_select = value;
1992
1993 /* Device speed */
1994 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
1995 spi->max_speed_hz = value;
1996
1997 return 0;
1998 }
1999
2000 static struct spi_device *
2001 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2002 {
2003 struct spi_device *spi;
2004 int rc;
2005
2006 /* Alloc an spi_device */
2007 spi = spi_alloc_device(ctlr);
2008 if (!spi) {
2009 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2010 rc = -ENOMEM;
2011 goto err_out;
2012 }
2013
2014 /* Select device driver */
2015 rc = of_modalias_node(nc, spi->modalias,
2016 sizeof(spi->modalias));
2017 if (rc < 0) {
2018 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2019 goto err_out;
2020 }
2021
2022 rc = of_spi_parse_dt(ctlr, spi, nc);
2023 if (rc)
2024 goto err_out;
2025
2026 /* Store a pointer to the node in the device structure */
2027 of_node_get(nc);
2028 spi->dev.of_node = nc;
2029
2030 /* Register the new device */
2031 rc = spi_add_device(spi);
2032 if (rc) {
2033 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2034 goto err_of_node_put;
2035 }
2036
2037 return spi;
2038
2039 err_of_node_put:
2040 of_node_put(nc);
2041 err_out:
2042 spi_dev_put(spi);
2043 return ERR_PTR(rc);
2044 }
2045
2046 /**
2047 * of_register_spi_devices() - Register child devices onto the SPI bus
2048 * @ctlr: Pointer to spi_controller device
2049 *
2050 * Registers an spi_device for each child node of controller node which
2051 * represents a valid SPI slave.
2052 */
2053 static void of_register_spi_devices(struct spi_controller *ctlr)
2054 {
2055 struct spi_device *spi;
2056 struct device_node *nc;
2057
2058 if (!ctlr->dev.of_node)
2059 return;
2060
2061 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2062 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2063 continue;
2064 spi = of_register_spi_device(ctlr, nc);
2065 if (IS_ERR(spi)) {
2066 dev_warn(&ctlr->dev,
2067 "Failed to create SPI device for %pOF\n", nc);
2068 of_node_clear_flag(nc, OF_POPULATED);
2069 }
2070 }
2071 }
2072 #else
2073 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2074 #endif
2075
2076 #ifdef CONFIG_ACPI
2077 struct acpi_spi_lookup {
2078 struct spi_controller *ctlr;
2079 u32 max_speed_hz;
2080 u32 mode;
2081 int irq;
2082 u8 bits_per_word;
2083 u8 chip_select;
2084 };
2085
2086 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2087 struct acpi_spi_lookup *lookup)
2088 {
2089 const union acpi_object *obj;
2090
2091 if (!x86_apple_machine)
2092 return;
2093
2094 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2095 && obj->buffer.length >= 4)
2096 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2097
2098 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2099 && obj->buffer.length == 8)
2100 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2101
2102 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2103 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2104 lookup->mode |= SPI_LSB_FIRST;
2105
2106 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2107 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2108 lookup->mode |= SPI_CPOL;
2109
2110 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2111 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2112 lookup->mode |= SPI_CPHA;
2113 }
2114
2115 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2116 {
2117 struct acpi_spi_lookup *lookup = data;
2118 struct spi_controller *ctlr = lookup->ctlr;
2119
2120 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2121 struct acpi_resource_spi_serialbus *sb;
2122 acpi_handle parent_handle;
2123 acpi_status status;
2124
2125 sb = &ares->data.spi_serial_bus;
2126 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2127
2128 status = acpi_get_handle(NULL,
2129 sb->resource_source.string_ptr,
2130 &parent_handle);
2131
2132 if (ACPI_FAILURE(status) ||
2133 ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2134 return -ENODEV;
2135
2136 /*
2137 * ACPI DeviceSelection numbering is handled by the
2138 * host controller driver in Windows and can vary
2139 * from driver to driver. In Linux we always expect
2140 * 0 .. max - 1 so we need to ask the driver to
2141 * translate between the two schemes.
2142 */
2143 if (ctlr->fw_translate_cs) {
2144 int cs = ctlr->fw_translate_cs(ctlr,
2145 sb->device_selection);
2146 if (cs < 0)
2147 return cs;
2148 lookup->chip_select = cs;
2149 } else {
2150 lookup->chip_select = sb->device_selection;
2151 }
2152
2153 lookup->max_speed_hz = sb->connection_speed;
2154 lookup->bits_per_word = sb->data_bit_length;
2155
2156 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2157 lookup->mode |= SPI_CPHA;
2158 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2159 lookup->mode |= SPI_CPOL;
2160 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2161 lookup->mode |= SPI_CS_HIGH;
2162 }
2163 } else if (lookup->irq < 0) {
2164 struct resource r;
2165
2166 if (acpi_dev_resource_interrupt(ares, 0, &r))
2167 lookup->irq = r.start;
2168 }
2169
2170 /* Always tell the ACPI core to skip this resource */
2171 return 1;
2172 }
2173
2174 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2175 struct acpi_device *adev)
2176 {
2177 acpi_handle parent_handle = NULL;
2178 struct list_head resource_list;
2179 struct acpi_spi_lookup lookup = {};
2180 struct spi_device *spi;
2181 int ret;
2182
2183 if (acpi_bus_get_status(adev) || !adev->status.present ||
2184 acpi_device_enumerated(adev))
2185 return AE_OK;
2186
2187 lookup.ctlr = ctlr;
2188 lookup.irq = -1;
2189
2190 INIT_LIST_HEAD(&resource_list);
2191 ret = acpi_dev_get_resources(adev, &resource_list,
2192 acpi_spi_add_resource, &lookup);
2193 acpi_dev_free_resource_list(&resource_list);
2194
2195 if (ret < 0)
2196 /* found SPI in _CRS but it points to another controller */
2197 return AE_OK;
2198
2199 if (!lookup.max_speed_hz &&
2200 !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
2201 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
2202 /* Apple does not use _CRS but nested devices for SPI slaves */
2203 acpi_spi_parse_apple_properties(adev, &lookup);
2204 }
2205
2206 if (!lookup.max_speed_hz)
2207 return AE_OK;
2208
2209 spi = spi_alloc_device(ctlr);
2210 if (!spi) {
2211 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
2212 dev_name(&adev->dev));
2213 return AE_NO_MEMORY;
2214 }
2215
2216
2217 ACPI_COMPANION_SET(&spi->dev, adev);
2218 spi->max_speed_hz = lookup.max_speed_hz;
2219 spi->mode |= lookup.mode;
2220 spi->irq = lookup.irq;
2221 spi->bits_per_word = lookup.bits_per_word;
2222 spi->chip_select = lookup.chip_select;
2223
2224 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2225 sizeof(spi->modalias));
2226
2227 if (spi->irq < 0)
2228 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2229
2230 acpi_device_set_enumerated(adev);
2231
2232 adev->power.flags.ignore_parent = true;
2233 if (spi_add_device(spi)) {
2234 adev->power.flags.ignore_parent = false;
2235 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2236 dev_name(&adev->dev));
2237 spi_dev_put(spi);
2238 }
2239
2240 return AE_OK;
2241 }
2242
2243 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2244 void *data, void **return_value)
2245 {
2246 struct spi_controller *ctlr = data;
2247 struct acpi_device *adev;
2248
2249 if (acpi_bus_get_device(handle, &adev))
2250 return AE_OK;
2251
2252 return acpi_register_spi_device(ctlr, adev);
2253 }
2254
2255 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2256
2257 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2258 {
2259 acpi_status status;
2260 acpi_handle handle;
2261
2262 handle = ACPI_HANDLE(ctlr->dev.parent);
2263 if (!handle)
2264 return;
2265
2266 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2267 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2268 acpi_spi_add_device, NULL, ctlr, NULL);
2269 if (ACPI_FAILURE(status))
2270 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2271 }
2272 #else
2273 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2274 #endif /* CONFIG_ACPI */
2275
2276 static void spi_controller_release(struct device *dev)
2277 {
2278 struct spi_controller *ctlr;
2279
2280 ctlr = container_of(dev, struct spi_controller, dev);
2281 kfree(ctlr);
2282 }
2283
2284 static struct class spi_master_class = {
2285 .name = "spi_master",
2286 .owner = THIS_MODULE,
2287 .dev_release = spi_controller_release,
2288 .dev_groups = spi_master_groups,
2289 };
2290
2291 #ifdef CONFIG_SPI_SLAVE
2292 /**
2293 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2294 * controller
2295 * @spi: device used for the current transfer
2296 */
2297 int spi_slave_abort(struct spi_device *spi)
2298 {
2299 struct spi_controller *ctlr = spi->controller;
2300
2301 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2302 return ctlr->slave_abort(ctlr);
2303
2304 return -ENOTSUPP;
2305 }
2306 EXPORT_SYMBOL_GPL(spi_slave_abort);
2307
2308 static int match_true(struct device *dev, void *data)
2309 {
2310 return 1;
2311 }
2312
2313 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2314 char *buf)
2315 {
2316 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2317 dev);
2318 struct device *child;
2319
2320 child = device_find_child(&ctlr->dev, NULL, match_true);
2321 return sprintf(buf, "%s\n",
2322 child ? to_spi_device(child)->modalias : NULL);
2323 }
2324
2325 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2326 const char *buf, size_t count)
2327 {
2328 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2329 dev);
2330 struct spi_device *spi;
2331 struct device *child;
2332 char name[32];
2333 int rc;
2334
2335 rc = sscanf(buf, "%31s", name);
2336 if (rc != 1 || !name[0])
2337 return -EINVAL;
2338
2339 child = device_find_child(&ctlr->dev, NULL, match_true);
2340 if (child) {
2341 /* Remove registered slave */
2342 device_unregister(child);
2343 put_device(child);
2344 }
2345
2346 if (strcmp(name, "(null)")) {
2347 /* Register new slave */
2348 spi = spi_alloc_device(ctlr);
2349 if (!spi)
2350 return -ENOMEM;
2351
2352 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2353
2354 rc = spi_add_device(spi);
2355 if (rc) {
2356 spi_dev_put(spi);
2357 return rc;
2358 }
2359 }
2360
2361 return count;
2362 }
2363
2364 static DEVICE_ATTR_RW(slave);
2365
2366 static struct attribute *spi_slave_attrs[] = {
2367 &dev_attr_slave.attr,
2368 NULL,
2369 };
2370
2371 static const struct attribute_group spi_slave_group = {
2372 .attrs = spi_slave_attrs,
2373 };
2374
2375 static const struct attribute_group *spi_slave_groups[] = {
2376 &spi_controller_statistics_group,
2377 &spi_slave_group,
2378 NULL,
2379 };
2380
2381 static struct class spi_slave_class = {
2382 .name = "spi_slave",
2383 .owner = THIS_MODULE,
2384 .dev_release = spi_controller_release,
2385 .dev_groups = spi_slave_groups,
2386 };
2387 #else
2388 extern struct class spi_slave_class; /* dummy */
2389 #endif
2390
2391 /**
2392 * __spi_alloc_controller - allocate an SPI master or slave controller
2393 * @dev: the controller, possibly using the platform_bus
2394 * @size: how much zeroed driver-private data to allocate; the pointer to this
2395 * memory is in the driver_data field of the returned device, accessible
2396 * with spi_controller_get_devdata(); the memory is cacheline aligned;
2397 * drivers granting DMA access to portions of their private data need to
2398 * round up @size using ALIGN(size, dma_get_cache_alignment()).
2399 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2400 * slave (true) controller
2401 * Context: can sleep
2402 *
2403 * This call is used only by SPI controller drivers, which are the
2404 * only ones directly touching chip registers. It's how they allocate
2405 * an spi_controller structure, prior to calling spi_register_controller().
2406 *
2407 * This must be called from context that can sleep.
2408 *
2409 * The caller is responsible for assigning the bus number and initializing the
2410 * controller's methods before calling spi_register_controller(); and (after
2411 * errors adding the device) calling spi_controller_put() to prevent a memory
2412 * leak.
2413 *
2414 * Return: the SPI controller structure on success, else NULL.
2415 */
2416 struct spi_controller *__spi_alloc_controller(struct device *dev,
2417 unsigned int size, bool slave)
2418 {
2419 struct spi_controller *ctlr;
2420 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2421
2422 if (!dev)
2423 return NULL;
2424
2425 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2426 if (!ctlr)
2427 return NULL;
2428
2429 device_initialize(&ctlr->dev);
2430 ctlr->bus_num = -1;
2431 ctlr->num_chipselect = 1;
2432 ctlr->slave = slave;
2433 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2434 ctlr->dev.class = &spi_slave_class;
2435 else
2436 ctlr->dev.class = &spi_master_class;
2437 ctlr->dev.parent = dev;
2438 pm_suspend_ignore_children(&ctlr->dev, true);
2439 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2440
2441 return ctlr;
2442 }
2443 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2444
2445 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2446 {
2447 spi_controller_put(*(struct spi_controller **)ctlr);
2448 }
2449
2450 /**
2451 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2452 * @dev: physical device of SPI controller
2453 * @size: how much zeroed driver-private data to allocate
2454 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2455 * Context: can sleep
2456 *
2457 * Allocate an SPI controller and automatically release a reference on it
2458 * when @dev is unbound from its driver. Drivers are thus relieved from
2459 * having to call spi_controller_put().
2460 *
2461 * The arguments to this function are identical to __spi_alloc_controller().
2462 *
2463 * Return: the SPI controller structure on success, else NULL.
2464 */
2465 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2466 unsigned int size,
2467 bool slave)
2468 {
2469 struct spi_controller **ptr, *ctlr;
2470
2471 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2472 GFP_KERNEL);
2473 if (!ptr)
2474 return NULL;
2475
2476 ctlr = __spi_alloc_controller(dev, size, slave);
2477 if (ctlr) {
2478 *ptr = ctlr;
2479 devres_add(dev, ptr);
2480 } else {
2481 devres_free(ptr);
2482 }
2483
2484 return ctlr;
2485 }
2486 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2487
2488 #ifdef CONFIG_OF
2489 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2490 {
2491 int nb, i, *cs;
2492 struct device_node *np = ctlr->dev.of_node;
2493
2494 if (!np)
2495 return 0;
2496
2497 nb = of_gpio_named_count(np, "cs-gpios");
2498 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2499
2500 /* Return error only for an incorrectly formed cs-gpios property */
2501 if (nb == 0 || nb == -ENOENT)
2502 return 0;
2503 else if (nb < 0)
2504 return nb;
2505
2506 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2507 GFP_KERNEL);
2508 ctlr->cs_gpios = cs;
2509
2510 if (!ctlr->cs_gpios)
2511 return -ENOMEM;
2512
2513 for (i = 0; i < ctlr->num_chipselect; i++)
2514 cs[i] = -ENOENT;
2515
2516 for (i = 0; i < nb; i++)
2517 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2518
2519 return 0;
2520 }
2521 #else
2522 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2523 {
2524 return 0;
2525 }
2526 #endif
2527
2528 /**
2529 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2530 * @ctlr: The SPI master to grab GPIO descriptors for
2531 */
2532 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2533 {
2534 int nb, i;
2535 struct gpio_desc **cs;
2536 struct device *dev = &ctlr->dev;
2537 unsigned long native_cs_mask = 0;
2538 unsigned int num_cs_gpios = 0;
2539
2540 nb = gpiod_count(dev, "cs");
2541 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2542
2543 /* No GPIOs at all is fine, else return the error */
2544 if (nb == 0 || nb == -ENOENT)
2545 return 0;
2546 else if (nb < 0)
2547 return nb;
2548
2549 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2550 GFP_KERNEL);
2551 if (!cs)
2552 return -ENOMEM;
2553 ctlr->cs_gpiods = cs;
2554
2555 for (i = 0; i < nb; i++) {
2556 /*
2557 * Most chipselects are active low, the inverted
2558 * semantics are handled by special quirks in gpiolib,
2559 * so initializing them GPIOD_OUT_LOW here means
2560 * "unasserted", in most cases this will drive the physical
2561 * line high.
2562 */
2563 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2564 GPIOD_OUT_LOW);
2565 if (IS_ERR(cs[i]))
2566 return PTR_ERR(cs[i]);
2567
2568 if (cs[i]) {
2569 /*
2570 * If we find a CS GPIO, name it after the device and
2571 * chip select line.
2572 */
2573 char *gpioname;
2574
2575 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2576 dev_name(dev), i);
2577 if (!gpioname)
2578 return -ENOMEM;
2579 gpiod_set_consumer_name(cs[i], gpioname);
2580 num_cs_gpios++;
2581 continue;
2582 }
2583
2584 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2585 dev_err(dev, "Invalid native chip select %d\n", i);
2586 return -EINVAL;
2587 }
2588 native_cs_mask |= BIT(i);
2589 }
2590
2591 ctlr->unused_native_cs = ffz(native_cs_mask);
2592 if (num_cs_gpios && ctlr->max_native_cs &&
2593 ctlr->unused_native_cs >= ctlr->max_native_cs) {
2594 dev_err(dev, "No unused native chip select available\n");
2595 return -EINVAL;
2596 }
2597
2598 return 0;
2599 }
2600
2601 static int spi_controller_check_ops(struct spi_controller *ctlr)
2602 {
2603 /*
2604 * The controller may implement only the high-level SPI-memory like
2605 * operations if it does not support regular SPI transfers, and this is
2606 * valid use case.
2607 * If ->mem_ops is NULL, we request that at least one of the
2608 * ->transfer_xxx() method be implemented.
2609 */
2610 if (ctlr->mem_ops) {
2611 if (!ctlr->mem_ops->exec_op)
2612 return -EINVAL;
2613 } else if (!ctlr->transfer && !ctlr->transfer_one &&
2614 !ctlr->transfer_one_message) {
2615 return -EINVAL;
2616 }
2617
2618 return 0;
2619 }
2620
2621 /**
2622 * spi_register_controller - register SPI master or slave controller
2623 * @ctlr: initialized master, originally from spi_alloc_master() or
2624 * spi_alloc_slave()
2625 * Context: can sleep
2626 *
2627 * SPI controllers connect to their drivers using some non-SPI bus,
2628 * such as the platform bus. The final stage of probe() in that code
2629 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2630 *
2631 * SPI controllers use board specific (often SOC specific) bus numbers,
2632 * and board-specific addressing for SPI devices combines those numbers
2633 * with chip select numbers. Since SPI does not directly support dynamic
2634 * device identification, boards need configuration tables telling which
2635 * chip is at which address.
2636 *
2637 * This must be called from context that can sleep. It returns zero on
2638 * success, else a negative error code (dropping the controller's refcount).
2639 * After a successful return, the caller is responsible for calling
2640 * spi_unregister_controller().
2641 *
2642 * Return: zero on success, else a negative error code.
2643 */
2644 int spi_register_controller(struct spi_controller *ctlr)
2645 {
2646 struct device *dev = ctlr->dev.parent;
2647 struct boardinfo *bi;
2648 int status;
2649 int id, first_dynamic;
2650
2651 if (!dev)
2652 return -ENODEV;
2653
2654 /*
2655 * Make sure all necessary hooks are implemented before registering
2656 * the SPI controller.
2657 */
2658 status = spi_controller_check_ops(ctlr);
2659 if (status)
2660 return status;
2661
2662 if (ctlr->bus_num >= 0) {
2663 /* devices with a fixed bus num must check-in with the num */
2664 mutex_lock(&board_lock);
2665 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2666 ctlr->bus_num + 1, GFP_KERNEL);
2667 mutex_unlock(&board_lock);
2668 if (WARN(id < 0, "couldn't get idr"))
2669 return id == -ENOSPC ? -EBUSY : id;
2670 ctlr->bus_num = id;
2671 } else if (ctlr->dev.of_node) {
2672 /* allocate dynamic bus number using Linux idr */
2673 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2674 if (id >= 0) {
2675 ctlr->bus_num = id;
2676 mutex_lock(&board_lock);
2677 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2678 ctlr->bus_num + 1, GFP_KERNEL);
2679 mutex_unlock(&board_lock);
2680 if (WARN(id < 0, "couldn't get idr"))
2681 return id == -ENOSPC ? -EBUSY : id;
2682 }
2683 }
2684 if (ctlr->bus_num < 0) {
2685 first_dynamic = of_alias_get_highest_id("spi");
2686 if (first_dynamic < 0)
2687 first_dynamic = 0;
2688 else
2689 first_dynamic++;
2690
2691 mutex_lock(&board_lock);
2692 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2693 0, GFP_KERNEL);
2694 mutex_unlock(&board_lock);
2695 if (WARN(id < 0, "couldn't get idr"))
2696 return id;
2697 ctlr->bus_num = id;
2698 }
2699 INIT_LIST_HEAD(&ctlr->queue);
2700 spin_lock_init(&ctlr->queue_lock);
2701 spin_lock_init(&ctlr->bus_lock_spinlock);
2702 mutex_init(&ctlr->bus_lock_mutex);
2703 mutex_init(&ctlr->io_mutex);
2704 ctlr->bus_lock_flag = 0;
2705 init_completion(&ctlr->xfer_completion);
2706 if (!ctlr->max_dma_len)
2707 ctlr->max_dma_len = INT_MAX;
2708
2709 /* register the device, then userspace will see it.
2710 * registration fails if the bus ID is in use.
2711 */
2712 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2713
2714 if (!spi_controller_is_slave(ctlr)) {
2715 if (ctlr->use_gpio_descriptors) {
2716 status = spi_get_gpio_descs(ctlr);
2717 if (status)
2718 goto free_bus_id;
2719 /*
2720 * A controller using GPIO descriptors always
2721 * supports SPI_CS_HIGH if need be.
2722 */
2723 ctlr->mode_bits |= SPI_CS_HIGH;
2724 } else {
2725 /* Legacy code path for GPIOs from DT */
2726 status = of_spi_get_gpio_numbers(ctlr);
2727 if (status)
2728 goto free_bus_id;
2729 }
2730 }
2731
2732 /*
2733 * Even if it's just one always-selected device, there must
2734 * be at least one chipselect.
2735 */
2736 if (!ctlr->num_chipselect) {
2737 status = -EINVAL;
2738 goto free_bus_id;
2739 }
2740
2741 status = device_add(&ctlr->dev);
2742 if (status < 0)
2743 goto free_bus_id;
2744 dev_dbg(dev, "registered %s %s\n",
2745 spi_controller_is_slave(ctlr) ? "slave" : "master",
2746 dev_name(&ctlr->dev));
2747
2748 /*
2749 * If we're using a queued driver, start the queue. Note that we don't
2750 * need the queueing logic if the driver is only supporting high-level
2751 * memory operations.
2752 */
2753 if (ctlr->transfer) {
2754 dev_info(dev, "controller is unqueued, this is deprecated\n");
2755 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2756 status = spi_controller_initialize_queue(ctlr);
2757 if (status) {
2758 device_del(&ctlr->dev);
2759 goto free_bus_id;
2760 }
2761 }
2762 /* add statistics */
2763 spin_lock_init(&ctlr->statistics.lock);
2764
2765 mutex_lock(&board_lock);
2766 list_add_tail(&ctlr->list, &spi_controller_list);
2767 list_for_each_entry(bi, &board_list, list)
2768 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2769 mutex_unlock(&board_lock);
2770
2771 /* Register devices from the device tree and ACPI */
2772 of_register_spi_devices(ctlr);
2773 acpi_register_spi_devices(ctlr);
2774 return status;
2775
2776 free_bus_id:
2777 mutex_lock(&board_lock);
2778 idr_remove(&spi_master_idr, ctlr->bus_num);
2779 mutex_unlock(&board_lock);
2780 return status;
2781 }
2782 EXPORT_SYMBOL_GPL(spi_register_controller);
2783
2784 static void devm_spi_unregister(struct device *dev, void *res)
2785 {
2786 spi_unregister_controller(*(struct spi_controller **)res);
2787 }
2788
2789 /**
2790 * devm_spi_register_controller - register managed SPI master or slave
2791 * controller
2792 * @dev: device managing SPI controller
2793 * @ctlr: initialized controller, originally from spi_alloc_master() or
2794 * spi_alloc_slave()
2795 * Context: can sleep
2796 *
2797 * Register a SPI device as with spi_register_controller() which will
2798 * automatically be unregistered and freed.
2799 *
2800 * Return: zero on success, else a negative error code.
2801 */
2802 int devm_spi_register_controller(struct device *dev,
2803 struct spi_controller *ctlr)
2804 {
2805 struct spi_controller **ptr;
2806 int ret;
2807
2808 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2809 if (!ptr)
2810 return -ENOMEM;
2811
2812 ret = spi_register_controller(ctlr);
2813 if (!ret) {
2814 *ptr = ctlr;
2815 devres_add(dev, ptr);
2816 } else {
2817 devres_free(ptr);
2818 }
2819
2820 return ret;
2821 }
2822 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2823
2824 static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
2825 {
2826 return *(struct spi_controller **)res == ctlr;
2827 }
2828
2829 static int __unregister(struct device *dev, void *null)
2830 {
2831 spi_unregister_device(to_spi_device(dev));
2832 return 0;
2833 }
2834
2835 /**
2836 * spi_unregister_controller - unregister SPI master or slave controller
2837 * @ctlr: the controller being unregistered
2838 * Context: can sleep
2839 *
2840 * This call is used only by SPI controller drivers, which are the
2841 * only ones directly touching chip registers.
2842 *
2843 * This must be called from context that can sleep.
2844 *
2845 * Note that this function also drops a reference to the controller.
2846 */
2847 void spi_unregister_controller(struct spi_controller *ctlr)
2848 {
2849 struct spi_controller *found;
2850 int id = ctlr->bus_num;
2851
2852 /* Prevent addition of new devices, unregister existing ones */
2853 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2854 mutex_lock(&spi_add_lock);
2855
2856 device_for_each_child(&ctlr->dev, NULL, __unregister);
2857
2858 /* First make sure that this controller was ever added */
2859 mutex_lock(&board_lock);
2860 found = idr_find(&spi_master_idr, id);
2861 mutex_unlock(&board_lock);
2862 if (ctlr->queued) {
2863 if (spi_destroy_queue(ctlr))
2864 dev_err(&ctlr->dev, "queue remove failed\n");
2865 }
2866 mutex_lock(&board_lock);
2867 list_del(&ctlr->list);
2868 mutex_unlock(&board_lock);
2869
2870 device_del(&ctlr->dev);
2871
2872 /* Release the last reference on the controller if its driver
2873 * has not yet been converted to devm_spi_alloc_master/slave().
2874 */
2875 if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
2876 devm_spi_match_controller, ctlr))
2877 put_device(&ctlr->dev);
2878
2879 /* free bus id */
2880 mutex_lock(&board_lock);
2881 if (found == ctlr)
2882 idr_remove(&spi_master_idr, id);
2883 mutex_unlock(&board_lock);
2884
2885 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2886 mutex_unlock(&spi_add_lock);
2887 }
2888 EXPORT_SYMBOL_GPL(spi_unregister_controller);
2889
2890 int spi_controller_suspend(struct spi_controller *ctlr)
2891 {
2892 int ret;
2893
2894 /* Basically no-ops for non-queued controllers */
2895 if (!ctlr->queued)
2896 return 0;
2897
2898 ret = spi_stop_queue(ctlr);
2899 if (ret)
2900 dev_err(&ctlr->dev, "queue stop failed\n");
2901
2902 return ret;
2903 }
2904 EXPORT_SYMBOL_GPL(spi_controller_suspend);
2905
2906 int spi_controller_resume(struct spi_controller *ctlr)
2907 {
2908 int ret;
2909
2910 if (!ctlr->queued)
2911 return 0;
2912
2913 ret = spi_start_queue(ctlr);
2914 if (ret)
2915 dev_err(&ctlr->dev, "queue restart failed\n");
2916
2917 return ret;
2918 }
2919 EXPORT_SYMBOL_GPL(spi_controller_resume);
2920
2921 static int __spi_controller_match(struct device *dev, const void *data)
2922 {
2923 struct spi_controller *ctlr;
2924 const u16 *bus_num = data;
2925
2926 ctlr = container_of(dev, struct spi_controller, dev);
2927 return ctlr->bus_num == *bus_num;
2928 }
2929
2930 /**
2931 * spi_busnum_to_master - look up master associated with bus_num
2932 * @bus_num: the master's bus number
2933 * Context: can sleep
2934 *
2935 * This call may be used with devices that are registered after
2936 * arch init time. It returns a refcounted pointer to the relevant
2937 * spi_controller (which the caller must release), or NULL if there is
2938 * no such master registered.
2939 *
2940 * Return: the SPI master structure on success, else NULL.
2941 */
2942 struct spi_controller *spi_busnum_to_master(u16 bus_num)
2943 {
2944 struct device *dev;
2945 struct spi_controller *ctlr = NULL;
2946
2947 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2948 __spi_controller_match);
2949 if (dev)
2950 ctlr = container_of(dev, struct spi_controller, dev);
2951 /* reference got in class_find_device */
2952 return ctlr;
2953 }
2954 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2955
2956 /*-------------------------------------------------------------------------*/
2957
2958 /* Core methods for SPI resource management */
2959
2960 /**
2961 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2962 * during the processing of a spi_message while using
2963 * spi_transfer_one
2964 * @spi: the spi device for which we allocate memory
2965 * @release: the release code to execute for this resource
2966 * @size: size to alloc and return
2967 * @gfp: GFP allocation flags
2968 *
2969 * Return: the pointer to the allocated data
2970 *
2971 * This may get enhanced in the future to allocate from a memory pool
2972 * of the @spi_device or @spi_controller to avoid repeated allocations.
2973 */
2974 void *spi_res_alloc(struct spi_device *spi,
2975 spi_res_release_t release,
2976 size_t size, gfp_t gfp)
2977 {
2978 struct spi_res *sres;
2979
2980 sres = kzalloc(sizeof(*sres) + size, gfp);
2981 if (!sres)
2982 return NULL;
2983
2984 INIT_LIST_HEAD(&sres->entry);
2985 sres->release = release;
2986
2987 return sres->data;
2988 }
2989 EXPORT_SYMBOL_GPL(spi_res_alloc);
2990
2991 /**
2992 * spi_res_free - free an spi resource
2993 * @res: pointer to the custom data of a resource
2994 *
2995 */
2996 void spi_res_free(void *res)
2997 {
2998 struct spi_res *sres = container_of(res, struct spi_res, data);
2999
3000 if (!res)
3001 return;
3002
3003 WARN_ON(!list_empty(&sres->entry));
3004 kfree(sres);
3005 }
3006 EXPORT_SYMBOL_GPL(spi_res_free);
3007
3008 /**
3009 * spi_res_add - add a spi_res to the spi_message
3010 * @message: the spi message
3011 * @res: the spi_resource
3012 */
3013 void spi_res_add(struct spi_message *message, void *res)
3014 {
3015 struct spi_res *sres = container_of(res, struct spi_res, data);
3016
3017 WARN_ON(!list_empty(&sres->entry));
3018 list_add_tail(&sres->entry, &message->resources);
3019 }
3020 EXPORT_SYMBOL_GPL(spi_res_add);
3021
3022 /**
3023 * spi_res_release - release all spi resources for this message
3024 * @ctlr: the @spi_controller
3025 * @message: the @spi_message
3026 */
3027 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
3028 {
3029 struct spi_res *res, *tmp;
3030
3031 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
3032 if (res->release)
3033 res->release(ctlr, message, res->data);
3034
3035 list_del(&res->entry);
3036
3037 kfree(res);
3038 }
3039 }
3040 EXPORT_SYMBOL_GPL(spi_res_release);
3041
3042 /*-------------------------------------------------------------------------*/
3043
3044 /* Core methods for spi_message alterations */
3045
3046 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3047 struct spi_message *msg,
3048 void *res)
3049 {
3050 struct spi_replaced_transfers *rxfer = res;
3051 size_t i;
3052
3053 /* call extra callback if requested */
3054 if (rxfer->release)
3055 rxfer->release(ctlr, msg, res);
3056
3057 /* insert replaced transfers back into the message */
3058 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3059
3060 /* remove the formerly inserted entries */
3061 for (i = 0; i < rxfer->inserted; i++)
3062 list_del(&rxfer->inserted_transfers[i].transfer_list);
3063 }
3064
3065 /**
3066 * spi_replace_transfers - replace transfers with several transfers
3067 * and register change with spi_message.resources
3068 * @msg: the spi_message we work upon
3069 * @xfer_first: the first spi_transfer we want to replace
3070 * @remove: number of transfers to remove
3071 * @insert: the number of transfers we want to insert instead
3072 * @release: extra release code necessary in some circumstances
3073 * @extradatasize: extra data to allocate (with alignment guarantees
3074 * of struct @spi_transfer)
3075 * @gfp: gfp flags
3076 *
3077 * Returns: pointer to @spi_replaced_transfers,
3078 * PTR_ERR(...) in case of errors.
3079 */
3080 struct spi_replaced_transfers *spi_replace_transfers(
3081 struct spi_message *msg,
3082 struct spi_transfer *xfer_first,
3083 size_t remove,
3084 size_t insert,
3085 spi_replaced_release_t release,
3086 size_t extradatasize,
3087 gfp_t gfp)
3088 {
3089 struct spi_replaced_transfers *rxfer;
3090 struct spi_transfer *xfer;
3091 size_t i;
3092
3093 /* allocate the structure using spi_res */
3094 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3095 struct_size(rxfer, inserted_transfers, insert)
3096 + extradatasize,
3097 gfp);
3098 if (!rxfer)
3099 return ERR_PTR(-ENOMEM);
3100
3101 /* the release code to invoke before running the generic release */
3102 rxfer->release = release;
3103
3104 /* assign extradata */
3105 if (extradatasize)
3106 rxfer->extradata =
3107 &rxfer->inserted_transfers[insert];
3108
3109 /* init the replaced_transfers list */
3110 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3111
3112 /* assign the list_entry after which we should reinsert
3113 * the @replaced_transfers - it may be spi_message.messages!
3114 */
3115 rxfer->replaced_after = xfer_first->transfer_list.prev;
3116
3117 /* remove the requested number of transfers */
3118 for (i = 0; i < remove; i++) {
3119 /* if the entry after replaced_after it is msg->transfers
3120 * then we have been requested to remove more transfers
3121 * than are in the list
3122 */
3123 if (rxfer->replaced_after->next == &msg->transfers) {
3124 dev_err(&msg->spi->dev,
3125 "requested to remove more spi_transfers than are available\n");
3126 /* insert replaced transfers back into the message */
3127 list_splice(&rxfer->replaced_transfers,
3128 rxfer->replaced_after);
3129
3130 /* free the spi_replace_transfer structure */
3131 spi_res_free(rxfer);
3132
3133 /* and return with an error */
3134 return ERR_PTR(-EINVAL);
3135 }
3136
3137 /* remove the entry after replaced_after from list of
3138 * transfers and add it to list of replaced_transfers
3139 */
3140 list_move_tail(rxfer->replaced_after->next,
3141 &rxfer->replaced_transfers);
3142 }
3143
3144 /* create copy of the given xfer with identical settings
3145 * based on the first transfer to get removed
3146 */
3147 for (i = 0; i < insert; i++) {
3148 /* we need to run in reverse order */
3149 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3150
3151 /* copy all spi_transfer data */
3152 memcpy(xfer, xfer_first, sizeof(*xfer));
3153
3154 /* add to list */
3155 list_add(&xfer->transfer_list, rxfer->replaced_after);
3156
3157 /* clear cs_change and delay for all but the last */
3158 if (i) {
3159 xfer->cs_change = false;
3160 xfer->delay_usecs = 0;
3161 xfer->delay.value = 0;
3162 }
3163 }
3164
3165 /* set up inserted */
3166 rxfer->inserted = insert;
3167
3168 /* and register it with spi_res/spi_message */
3169 spi_res_add(msg, rxfer);
3170
3171 return rxfer;
3172 }
3173 EXPORT_SYMBOL_GPL(spi_replace_transfers);
3174
3175 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3176 struct spi_message *msg,
3177 struct spi_transfer **xferp,
3178 size_t maxsize,
3179 gfp_t gfp)
3180 {
3181 struct spi_transfer *xfer = *xferp, *xfers;
3182 struct spi_replaced_transfers *srt;
3183 size_t offset;
3184 size_t count, i;
3185
3186 /* calculate how many we have to replace */
3187 count = DIV_ROUND_UP(xfer->len, maxsize);
3188
3189 /* create replacement */
3190 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3191 if (IS_ERR(srt))
3192 return PTR_ERR(srt);
3193 xfers = srt->inserted_transfers;
3194
3195 /* now handle each of those newly inserted spi_transfers
3196 * note that the replacements spi_transfers all are preset
3197 * to the same values as *xferp, so tx_buf, rx_buf and len
3198 * are all identical (as well as most others)
3199 * so we just have to fix up len and the pointers.
3200 *
3201 * this also includes support for the depreciated
3202 * spi_message.is_dma_mapped interface
3203 */
3204
3205 /* the first transfer just needs the length modified, so we
3206 * run it outside the loop
3207 */
3208 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3209
3210 /* all the others need rx_buf/tx_buf also set */
3211 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3212 /* update rx_buf, tx_buf and dma */
3213 if (xfers[i].rx_buf)
3214 xfers[i].rx_buf += offset;
3215 if (xfers[i].rx_dma)
3216 xfers[i].rx_dma += offset;
3217 if (xfers[i].tx_buf)
3218 xfers[i].tx_buf += offset;
3219 if (xfers[i].tx_dma)
3220 xfers[i].tx_dma += offset;
3221
3222 /* update length */
3223 xfers[i].len = min(maxsize, xfers[i].len - offset);
3224 }
3225
3226 /* we set up xferp to the last entry we have inserted,
3227 * so that we skip those already split transfers
3228 */
3229 *xferp = &xfers[count - 1];
3230
3231 /* increment statistics counters */
3232 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3233 transfers_split_maxsize);
3234 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
3235 transfers_split_maxsize);
3236
3237 return 0;
3238 }
3239
3240 /**
3241 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
3242 * when an individual transfer exceeds a
3243 * certain size
3244 * @ctlr: the @spi_controller for this transfer
3245 * @msg: the @spi_message to transform
3246 * @maxsize: the maximum when to apply this
3247 * @gfp: GFP allocation flags
3248 *
3249 * Return: status of transformation
3250 */
3251 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3252 struct spi_message *msg,
3253 size_t maxsize,
3254 gfp_t gfp)
3255 {
3256 struct spi_transfer *xfer;
3257 int ret;
3258
3259 /* iterate over the transfer_list,
3260 * but note that xfer is advanced to the last transfer inserted
3261 * to avoid checking sizes again unnecessarily (also xfer does
3262 * potentiall belong to a different list by the time the
3263 * replacement has happened
3264 */
3265 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3266 if (xfer->len > maxsize) {
3267 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3268 maxsize, gfp);
3269 if (ret)
3270 return ret;
3271 }
3272 }
3273
3274 return 0;
3275 }
3276 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3277
3278 /*-------------------------------------------------------------------------*/
3279
3280 /* Core methods for SPI controller protocol drivers. Some of the
3281 * other core methods are currently defined as inline functions.
3282 */
3283
3284 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3285 u8 bits_per_word)
3286 {
3287 if (ctlr->bits_per_word_mask) {
3288 /* Only 32 bits fit in the mask */
3289 if (bits_per_word > 32)
3290 return -EINVAL;
3291 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3292 return -EINVAL;
3293 }
3294
3295 return 0;
3296 }
3297
3298 /**
3299 * spi_setup - setup SPI mode and clock rate
3300 * @spi: the device whose settings are being modified
3301 * Context: can sleep, and no requests are queued to the device
3302 *
3303 * SPI protocol drivers may need to update the transfer mode if the
3304 * device doesn't work with its default. They may likewise need
3305 * to update clock rates or word sizes from initial values. This function
3306 * changes those settings, and must be called from a context that can sleep.
3307 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3308 * effect the next time the device is selected and data is transferred to
3309 * or from it. When this function returns, the spi device is deselected.
3310 *
3311 * Note that this call will fail if the protocol driver specifies an option
3312 * that the underlying controller or its driver does not support. For
3313 * example, not all hardware supports wire transfers using nine bit words,
3314 * LSB-first wire encoding, or active-high chipselects.
3315 *
3316 * Return: zero on success, else a negative error code.
3317 */
3318 int spi_setup(struct spi_device *spi)
3319 {
3320 unsigned bad_bits, ugly_bits;
3321 int status;
3322
3323 /* check mode to prevent that DUAL and QUAD set at the same time
3324 */
3325 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
3326 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
3327 dev_err(&spi->dev,
3328 "setup: can not select dual and quad at the same time\n");
3329 return -EINVAL;
3330 }
3331 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
3332 */
3333 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3334 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3335 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3336 return -EINVAL;
3337 /* help drivers fail *cleanly* when they need options
3338 * that aren't supported with their current controller
3339 * SPI_CS_WORD has a fallback software implementation,
3340 * so it is ignored here.
3341 */
3342 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
3343 /* nothing prevents from working with active-high CS in case if it
3344 * is driven by GPIO.
3345 */
3346 if (gpio_is_valid(spi->cs_gpio))
3347 bad_bits &= ~SPI_CS_HIGH;
3348 ugly_bits = bad_bits &
3349 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3350 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3351 if (ugly_bits) {
3352 dev_warn(&spi->dev,
3353 "setup: ignoring unsupported mode bits %x\n",
3354 ugly_bits);
3355 spi->mode &= ~ugly_bits;
3356 bad_bits &= ~ugly_bits;
3357 }
3358 if (bad_bits) {
3359 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3360 bad_bits);
3361 return -EINVAL;
3362 }
3363
3364 if (!spi->bits_per_word)
3365 spi->bits_per_word = 8;
3366
3367 status = __spi_validate_bits_per_word(spi->controller,
3368 spi->bits_per_word);
3369 if (status)
3370 return status;
3371
3372 if (!spi->max_speed_hz)
3373 spi->max_speed_hz = spi->controller->max_speed_hz;
3374
3375 if (spi->controller->setup)
3376 status = spi->controller->setup(spi);
3377
3378 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3379 status = pm_runtime_get_sync(spi->controller->dev.parent);
3380 if (status < 0) {
3381 pm_runtime_put_noidle(spi->controller->dev.parent);
3382 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3383 status);
3384 return status;
3385 }
3386
3387 /*
3388 * We do not want to return positive value from pm_runtime_get,
3389 * there are many instances of devices calling spi_setup() and
3390 * checking for a non-zero return value instead of a negative
3391 * return value.
3392 */
3393 status = 0;
3394
3395 spi_set_cs(spi, false);
3396 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3397 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3398 } else {
3399 spi_set_cs(spi, false);
3400 }
3401
3402 if (spi->rt && !spi->controller->rt) {
3403 spi->controller->rt = true;
3404 spi_set_thread_rt(spi->controller);
3405 }
3406
3407 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3408 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
3409 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3410 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3411 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3412 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3413 spi->bits_per_word, spi->max_speed_hz,
3414 status);
3415
3416 return status;
3417 }
3418 EXPORT_SYMBOL_GPL(spi_setup);
3419
3420 /**
3421 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3422 * @spi: the device that requires specific CS timing configuration
3423 * @setup: CS setup time specified via @spi_delay
3424 * @hold: CS hold time specified via @spi_delay
3425 * @inactive: CS inactive delay between transfers specified via @spi_delay
3426 *
3427 * Return: zero on success, else a negative error code.
3428 */
3429 int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
3430 struct spi_delay *hold, struct spi_delay *inactive)
3431 {
3432 size_t len;
3433
3434 if (spi->controller->set_cs_timing)
3435 return spi->controller->set_cs_timing(spi, setup, hold,
3436 inactive);
3437
3438 if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
3439 (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
3440 (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
3441 dev_err(&spi->dev,
3442 "Clock-cycle delays for CS not supported in SW mode\n");
3443 return -ENOTSUPP;
3444 }
3445
3446 len = sizeof(struct spi_delay);
3447
3448 /* copy delays to controller */
3449 if (setup)
3450 memcpy(&spi->controller->cs_setup, setup, len);
3451 else
3452 memset(&spi->controller->cs_setup, 0, len);
3453
3454 if (hold)
3455 memcpy(&spi->controller->cs_hold, hold, len);
3456 else
3457 memset(&spi->controller->cs_hold, 0, len);
3458
3459 if (inactive)
3460 memcpy(&spi->controller->cs_inactive, inactive, len);
3461 else
3462 memset(&spi->controller->cs_inactive, 0, len);
3463
3464 return 0;
3465 }
3466 EXPORT_SYMBOL_GPL(spi_set_cs_timing);
3467
3468 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3469 struct spi_device *spi)
3470 {
3471 int delay1, delay2;
3472
3473 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3474 if (delay1 < 0)
3475 return delay1;
3476
3477 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3478 if (delay2 < 0)
3479 return delay2;
3480
3481 if (delay1 < delay2)
3482 memcpy(&xfer->word_delay, &spi->word_delay,
3483 sizeof(xfer->word_delay));
3484
3485 return 0;
3486 }
3487
3488 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3489 {
3490 struct spi_controller *ctlr = spi->controller;
3491 struct spi_transfer *xfer;
3492 int w_size;
3493
3494 if (list_empty(&message->transfers))
3495 return -EINVAL;
3496
3497 /* If an SPI controller does not support toggling the CS line on each
3498 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3499 * for the CS line, we can emulate the CS-per-word hardware function by
3500 * splitting transfers into one-word transfers and ensuring that
3501 * cs_change is set for each transfer.
3502 */
3503 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3504 spi->cs_gpiod ||
3505 gpio_is_valid(spi->cs_gpio))) {
3506 size_t maxsize;
3507 int ret;
3508
3509 maxsize = (spi->bits_per_word + 7) / 8;
3510
3511 /* spi_split_transfers_maxsize() requires message->spi */
3512 message->spi = spi;
3513
3514 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3515 GFP_KERNEL);
3516 if (ret)
3517 return ret;
3518
3519 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3520 /* don't change cs_change on the last entry in the list */
3521 if (list_is_last(&xfer->transfer_list, &message->transfers))
3522 break;
3523 xfer->cs_change = 1;
3524 }
3525 }
3526
3527 /* Half-duplex links include original MicroWire, and ones with
3528 * only one data pin like SPI_3WIRE (switches direction) or where
3529 * either MOSI or MISO is missing. They can also be caused by
3530 * software limitations.
3531 */
3532 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3533 (spi->mode & SPI_3WIRE)) {
3534 unsigned flags = ctlr->flags;
3535
3536 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3537 if (xfer->rx_buf && xfer->tx_buf)
3538 return -EINVAL;
3539 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3540 return -EINVAL;
3541 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3542 return -EINVAL;
3543 }
3544 }
3545
3546 /**
3547 * Set transfer bits_per_word and max speed as spi device default if
3548 * it is not set for this transfer.
3549 * Set transfer tx_nbits and rx_nbits as single transfer default
3550 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3551 * Ensure transfer word_delay is at least as long as that required by
3552 * device itself.
3553 */
3554 message->frame_length = 0;
3555 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3556 xfer->effective_speed_hz = 0;
3557 message->frame_length += xfer->len;
3558 if (!xfer->bits_per_word)
3559 xfer->bits_per_word = spi->bits_per_word;
3560
3561 if (!xfer->speed_hz)
3562 xfer->speed_hz = spi->max_speed_hz;
3563
3564 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3565 xfer->speed_hz = ctlr->max_speed_hz;
3566
3567 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3568 return -EINVAL;
3569
3570 /*
3571 * SPI transfer length should be multiple of SPI word size
3572 * where SPI word size should be power-of-two multiple
3573 */
3574 if (xfer->bits_per_word <= 8)
3575 w_size = 1;
3576 else if (xfer->bits_per_word <= 16)
3577 w_size = 2;
3578 else
3579 w_size = 4;
3580
3581 /* No partial transfers accepted */
3582 if (xfer->len % w_size)
3583 return -EINVAL;
3584
3585 if (xfer->speed_hz && ctlr->min_speed_hz &&
3586 xfer->speed_hz < ctlr->min_speed_hz)
3587 return -EINVAL;
3588
3589 if (xfer->tx_buf && !xfer->tx_nbits)
3590 xfer->tx_nbits = SPI_NBITS_SINGLE;
3591 if (xfer->rx_buf && !xfer->rx_nbits)
3592 xfer->rx_nbits = SPI_NBITS_SINGLE;
3593 /* check transfer tx/rx_nbits:
3594 * 1. check the value matches one of single, dual and quad
3595 * 2. check tx/rx_nbits match the mode in spi_device
3596 */
3597 if (xfer->tx_buf) {
3598 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3599 xfer->tx_nbits != SPI_NBITS_DUAL &&
3600 xfer->tx_nbits != SPI_NBITS_QUAD)
3601 return -EINVAL;
3602 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3603 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3604 return -EINVAL;
3605 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3606 !(spi->mode & SPI_TX_QUAD))
3607 return -EINVAL;
3608 }
3609 /* check transfer rx_nbits */
3610 if (xfer->rx_buf) {
3611 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3612 xfer->rx_nbits != SPI_NBITS_DUAL &&
3613 xfer->rx_nbits != SPI_NBITS_QUAD)
3614 return -EINVAL;
3615 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3616 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3617 return -EINVAL;
3618 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3619 !(spi->mode & SPI_RX_QUAD))
3620 return -EINVAL;
3621 }
3622
3623 if (_spi_xfer_word_delay_update(xfer, spi))
3624 return -EINVAL;
3625 }
3626
3627 message->status = -EINPROGRESS;
3628
3629 return 0;
3630 }
3631
3632 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3633 {
3634 struct spi_controller *ctlr = spi->controller;
3635 struct spi_transfer *xfer;
3636
3637 /*
3638 * Some controllers do not support doing regular SPI transfers. Return
3639 * ENOTSUPP when this is the case.
3640 */
3641 if (!ctlr->transfer)
3642 return -ENOTSUPP;
3643
3644 message->spi = spi;
3645
3646 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3647 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3648
3649 trace_spi_message_submit(message);
3650
3651 if (!ctlr->ptp_sts_supported) {
3652 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3653 xfer->ptp_sts_word_pre = 0;
3654 ptp_read_system_prets(xfer->ptp_sts);
3655 }
3656 }
3657
3658 return ctlr->transfer(spi, message);
3659 }
3660
3661 /**
3662 * spi_async - asynchronous SPI transfer
3663 * @spi: device with which data will be exchanged
3664 * @message: describes the data transfers, including completion callback
3665 * Context: any (irqs may be blocked, etc)
3666 *
3667 * This call may be used in_irq and other contexts which can't sleep,
3668 * as well as from task contexts which can sleep.
3669 *
3670 * The completion callback is invoked in a context which can't sleep.
3671 * Before that invocation, the value of message->status is undefined.
3672 * When the callback is issued, message->status holds either zero (to
3673 * indicate complete success) or a negative error code. After that
3674 * callback returns, the driver which issued the transfer request may
3675 * deallocate the associated memory; it's no longer in use by any SPI
3676 * core or controller driver code.
3677 *
3678 * Note that although all messages to a spi_device are handled in
3679 * FIFO order, messages may go to different devices in other orders.
3680 * Some device might be higher priority, or have various "hard" access
3681 * time requirements, for example.
3682 *
3683 * On detection of any fault during the transfer, processing of
3684 * the entire message is aborted, and the device is deselected.
3685 * Until returning from the associated message completion callback,
3686 * no other spi_message queued to that device will be processed.
3687 * (This rule applies equally to all the synchronous transfer calls,
3688 * which are wrappers around this core asynchronous primitive.)
3689 *
3690 * Return: zero on success, else a negative error code.
3691 */
3692 int spi_async(struct spi_device *spi, struct spi_message *message)
3693 {
3694 struct spi_controller *ctlr = spi->controller;
3695 int ret;
3696 unsigned long flags;
3697
3698 ret = __spi_validate(spi, message);
3699 if (ret != 0)
3700 return ret;
3701
3702 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3703
3704 if (ctlr->bus_lock_flag)
3705 ret = -EBUSY;
3706 else
3707 ret = __spi_async(spi, message);
3708
3709 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3710
3711 return ret;
3712 }
3713 EXPORT_SYMBOL_GPL(spi_async);
3714
3715 /**
3716 * spi_async_locked - version of spi_async with exclusive bus usage
3717 * @spi: device with which data will be exchanged
3718 * @message: describes the data transfers, including completion callback
3719 * Context: any (irqs may be blocked, etc)
3720 *
3721 * This call may be used in_irq and other contexts which can't sleep,
3722 * as well as from task contexts which can sleep.
3723 *
3724 * The completion callback is invoked in a context which can't sleep.
3725 * Before that invocation, the value of message->status is undefined.
3726 * When the callback is issued, message->status holds either zero (to
3727 * indicate complete success) or a negative error code. After that
3728 * callback returns, the driver which issued the transfer request may
3729 * deallocate the associated memory; it's no longer in use by any SPI
3730 * core or controller driver code.
3731 *
3732 * Note that although all messages to a spi_device are handled in
3733 * FIFO order, messages may go to different devices in other orders.
3734 * Some device might be higher priority, or have various "hard" access
3735 * time requirements, for example.
3736 *
3737 * On detection of any fault during the transfer, processing of
3738 * the entire message is aborted, and the device is deselected.
3739 * Until returning from the associated message completion callback,
3740 * no other spi_message queued to that device will be processed.
3741 * (This rule applies equally to all the synchronous transfer calls,
3742 * which are wrappers around this core asynchronous primitive.)
3743 *
3744 * Return: zero on success, else a negative error code.
3745 */
3746 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3747 {
3748 struct spi_controller *ctlr = spi->controller;
3749 int ret;
3750 unsigned long flags;
3751
3752 ret = __spi_validate(spi, message);
3753 if (ret != 0)
3754 return ret;
3755
3756 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3757
3758 ret = __spi_async(spi, message);
3759
3760 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3761
3762 return ret;
3763
3764 }
3765 EXPORT_SYMBOL_GPL(spi_async_locked);
3766
3767 /*-------------------------------------------------------------------------*/
3768
3769 /* Utility methods for SPI protocol drivers, layered on
3770 * top of the core. Some other utility methods are defined as
3771 * inline functions.
3772 */
3773
3774 static void spi_complete(void *arg)
3775 {
3776 complete(arg);
3777 }
3778
3779 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3780 {
3781 DECLARE_COMPLETION_ONSTACK(done);
3782 int status;
3783 struct spi_controller *ctlr = spi->controller;
3784 unsigned long flags;
3785
3786 status = __spi_validate(spi, message);
3787 if (status != 0)
3788 return status;
3789
3790 message->complete = spi_complete;
3791 message->context = &done;
3792 message->spi = spi;
3793
3794 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3795 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3796
3797 /* If we're not using the legacy transfer method then we will
3798 * try to transfer in the calling context so special case.
3799 * This code would be less tricky if we could remove the
3800 * support for driver implemented message queues.
3801 */
3802 if (ctlr->transfer == spi_queued_transfer) {
3803 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3804
3805 trace_spi_message_submit(message);
3806
3807 status = __spi_queued_transfer(spi, message, false);
3808
3809 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3810 } else {
3811 status = spi_async_locked(spi, message);
3812 }
3813
3814 if (status == 0) {
3815 /* Push out the messages in the calling context if we
3816 * can.
3817 */
3818 if (ctlr->transfer == spi_queued_transfer) {
3819 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3820 spi_sync_immediate);
3821 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3822 spi_sync_immediate);
3823 __spi_pump_messages(ctlr, false);
3824 }
3825
3826 wait_for_completion(&done);
3827 status = message->status;
3828 }
3829 message->context = NULL;
3830 return status;
3831 }
3832
3833 /**
3834 * spi_sync - blocking/synchronous SPI data transfers
3835 * @spi: device with which data will be exchanged
3836 * @message: describes the data transfers
3837 * Context: can sleep
3838 *
3839 * This call may only be used from a context that may sleep. The sleep
3840 * is non-interruptible, and has no timeout. Low-overhead controller
3841 * drivers may DMA directly into and out of the message buffers.
3842 *
3843 * Note that the SPI device's chip select is active during the message,
3844 * and then is normally disabled between messages. Drivers for some
3845 * frequently-used devices may want to minimize costs of selecting a chip,
3846 * by leaving it selected in anticipation that the next message will go
3847 * to the same chip. (That may increase power usage.)
3848 *
3849 * Also, the caller is guaranteeing that the memory associated with the
3850 * message will not be freed before this call returns.
3851 *
3852 * Return: zero on success, else a negative error code.
3853 */
3854 int spi_sync(struct spi_device *spi, struct spi_message *message)
3855 {
3856 int ret;
3857
3858 mutex_lock(&spi->controller->bus_lock_mutex);
3859 ret = __spi_sync(spi, message);
3860 mutex_unlock(&spi->controller->bus_lock_mutex);
3861
3862 return ret;
3863 }
3864 EXPORT_SYMBOL_GPL(spi_sync);
3865
3866 /**
3867 * spi_sync_locked - version of spi_sync with exclusive bus usage
3868 * @spi: device with which data will be exchanged
3869 * @message: describes the data transfers
3870 * Context: can sleep
3871 *
3872 * This call may only be used from a context that may sleep. The sleep
3873 * is non-interruptible, and has no timeout. Low-overhead controller
3874 * drivers may DMA directly into and out of the message buffers.
3875 *
3876 * This call should be used by drivers that require exclusive access to the
3877 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3878 * be released by a spi_bus_unlock call when the exclusive access is over.
3879 *
3880 * Return: zero on success, else a negative error code.
3881 */
3882 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3883 {
3884 return __spi_sync(spi, message);
3885 }
3886 EXPORT_SYMBOL_GPL(spi_sync_locked);
3887
3888 /**
3889 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3890 * @ctlr: SPI bus master that should be locked for exclusive bus access
3891 * Context: can sleep
3892 *
3893 * This call may only be used from a context that may sleep. The sleep
3894 * is non-interruptible, and has no timeout.
3895 *
3896 * This call should be used by drivers that require exclusive access to the
3897 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3898 * exclusive access is over. Data transfer must be done by spi_sync_locked
3899 * and spi_async_locked calls when the SPI bus lock is held.
3900 *
3901 * Return: always zero.
3902 */
3903 int spi_bus_lock(struct spi_controller *ctlr)
3904 {
3905 unsigned long flags;
3906
3907 mutex_lock(&ctlr->bus_lock_mutex);
3908
3909 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3910 ctlr->bus_lock_flag = 1;
3911 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3912
3913 /* mutex remains locked until spi_bus_unlock is called */
3914
3915 return 0;
3916 }
3917 EXPORT_SYMBOL_GPL(spi_bus_lock);
3918
3919 /**
3920 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3921 * @ctlr: SPI bus master that was locked for exclusive bus access
3922 * Context: can sleep
3923 *
3924 * This call may only be used from a context that may sleep. The sleep
3925 * is non-interruptible, and has no timeout.
3926 *
3927 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3928 * call.
3929 *
3930 * Return: always zero.
3931 */
3932 int spi_bus_unlock(struct spi_controller *ctlr)
3933 {
3934 ctlr->bus_lock_flag = 0;
3935
3936 mutex_unlock(&ctlr->bus_lock_mutex);
3937
3938 return 0;
3939 }
3940 EXPORT_SYMBOL_GPL(spi_bus_unlock);
3941
3942 /* portable code must never pass more than 32 bytes */
3943 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3944
3945 static u8 *buf;
3946
3947 /**
3948 * spi_write_then_read - SPI synchronous write followed by read
3949 * @spi: device with which data will be exchanged
3950 * @txbuf: data to be written (need not be dma-safe)
3951 * @n_tx: size of txbuf, in bytes
3952 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3953 * @n_rx: size of rxbuf, in bytes
3954 * Context: can sleep
3955 *
3956 * This performs a half duplex MicroWire style transaction with the
3957 * device, sending txbuf and then reading rxbuf. The return value
3958 * is zero for success, else a negative errno status code.
3959 * This call may only be used from a context that may sleep.
3960 *
3961 * Parameters to this routine are always copied using a small buffer.
3962 * Performance-sensitive or bulk transfer code should instead use
3963 * spi_{async,sync}() calls with dma-safe buffers.
3964 *
3965 * Return: zero on success, else a negative error code.
3966 */
3967 int spi_write_then_read(struct spi_device *spi,
3968 const void *txbuf, unsigned n_tx,
3969 void *rxbuf, unsigned n_rx)
3970 {
3971 static DEFINE_MUTEX(lock);
3972
3973 int status;
3974 struct spi_message message;
3975 struct spi_transfer x[2];
3976 u8 *local_buf;
3977
3978 /* Use preallocated DMA-safe buffer if we can. We can't avoid
3979 * copying here, (as a pure convenience thing), but we can
3980 * keep heap costs out of the hot path unless someone else is
3981 * using the pre-allocated buffer or the transfer is too large.
3982 */
3983 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3984 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3985 GFP_KERNEL | GFP_DMA);
3986 if (!local_buf)
3987 return -ENOMEM;
3988 } else {
3989 local_buf = buf;
3990 }
3991
3992 spi_message_init(&message);
3993 memset(x, 0, sizeof(x));
3994 if (n_tx) {
3995 x[0].len = n_tx;
3996 spi_message_add_tail(&x[0], &message);
3997 }
3998 if (n_rx) {
3999 x[1].len = n_rx;
4000 spi_message_add_tail(&x[1], &message);
4001 }
4002
4003 memcpy(local_buf, txbuf, n_tx);
4004 x[0].tx_buf = local_buf;
4005 x[1].rx_buf = local_buf + n_tx;
4006
4007 /* do the i/o */
4008 status = spi_sync(spi, &message);
4009 if (status == 0)
4010 memcpy(rxbuf, x[1].rx_buf, n_rx);
4011
4012 if (x[0].tx_buf == buf)
4013 mutex_unlock(&lock);
4014 else
4015 kfree(local_buf);
4016
4017 return status;
4018 }
4019 EXPORT_SYMBOL_GPL(spi_write_then_read);
4020
4021 /*-------------------------------------------------------------------------*/
4022
4023 #if IS_ENABLED(CONFIG_OF)
4024 /* must call put_device() when done with returned spi_device device */
4025 struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4026 {
4027 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4028
4029 return dev ? to_spi_device(dev) : NULL;
4030 }
4031 EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
4032 #endif /* IS_ENABLED(CONFIG_OF) */
4033
4034 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4035 /* the spi controllers are not using spi_bus, so we find it with another way */
4036 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4037 {
4038 struct device *dev;
4039
4040 dev = class_find_device_by_of_node(&spi_master_class, node);
4041 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4042 dev = class_find_device_by_of_node(&spi_slave_class, node);
4043 if (!dev)
4044 return NULL;
4045
4046 /* reference got in class_find_device */
4047 return container_of(dev, struct spi_controller, dev);
4048 }
4049
4050 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4051 void *arg)
4052 {
4053 struct of_reconfig_data *rd = arg;
4054 struct spi_controller *ctlr;
4055 struct spi_device *spi;
4056
4057 switch (of_reconfig_get_state_change(action, arg)) {
4058 case OF_RECONFIG_CHANGE_ADD:
4059 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4060 if (ctlr == NULL)
4061 return NOTIFY_OK; /* not for us */
4062
4063 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4064 put_device(&ctlr->dev);
4065 return NOTIFY_OK;
4066 }
4067
4068 spi = of_register_spi_device(ctlr, rd->dn);
4069 put_device(&ctlr->dev);
4070
4071 if (IS_ERR(spi)) {
4072 pr_err("%s: failed to create for '%pOF'\n",
4073 __func__, rd->dn);
4074 of_node_clear_flag(rd->dn, OF_POPULATED);
4075 return notifier_from_errno(PTR_ERR(spi));
4076 }
4077 break;
4078
4079 case OF_RECONFIG_CHANGE_REMOVE:
4080 /* already depopulated? */
4081 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4082 return NOTIFY_OK;
4083
4084 /* find our device by node */
4085 spi = of_find_spi_device_by_node(rd->dn);
4086 if (spi == NULL)
4087 return NOTIFY_OK; /* no? not meant for us */
4088
4089 /* unregister takes one ref away */
4090 spi_unregister_device(spi);
4091
4092 /* and put the reference of the find */
4093 put_device(&spi->dev);
4094 break;
4095 }
4096
4097 return NOTIFY_OK;
4098 }
4099
4100 static struct notifier_block spi_of_notifier = {
4101 .notifier_call = of_spi_notify,
4102 };
4103 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4104 extern struct notifier_block spi_of_notifier;
4105 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4106
4107 #if IS_ENABLED(CONFIG_ACPI)
4108 static int spi_acpi_controller_match(struct device *dev, const void *data)
4109 {
4110 return ACPI_COMPANION(dev->parent) == data;
4111 }
4112
4113 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4114 {
4115 struct device *dev;
4116
4117 dev = class_find_device(&spi_master_class, NULL, adev,
4118 spi_acpi_controller_match);
4119 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4120 dev = class_find_device(&spi_slave_class, NULL, adev,
4121 spi_acpi_controller_match);
4122 if (!dev)
4123 return NULL;
4124
4125 return container_of(dev, struct spi_controller, dev);
4126 }
4127
4128 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4129 {
4130 struct device *dev;
4131
4132 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4133 return to_spi_device(dev);
4134 }
4135
4136 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4137 void *arg)
4138 {
4139 struct acpi_device *adev = arg;
4140 struct spi_controller *ctlr;
4141 struct spi_device *spi;
4142
4143 switch (value) {
4144 case ACPI_RECONFIG_DEVICE_ADD:
4145 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4146 if (!ctlr)
4147 break;
4148
4149 acpi_register_spi_device(ctlr, adev);
4150 put_device(&ctlr->dev);
4151 break;
4152 case ACPI_RECONFIG_DEVICE_REMOVE:
4153 if (!acpi_device_enumerated(adev))
4154 break;
4155
4156 spi = acpi_spi_find_device_by_adev(adev);
4157 if (!spi)
4158 break;
4159
4160 spi_unregister_device(spi);
4161 put_device(&spi->dev);
4162 break;
4163 }
4164
4165 return NOTIFY_OK;
4166 }
4167
4168 static struct notifier_block spi_acpi_notifier = {
4169 .notifier_call = acpi_spi_notify,
4170 };
4171 #else
4172 extern struct notifier_block spi_acpi_notifier;
4173 #endif
4174
4175 static int __init spi_init(void)
4176 {
4177 int status;
4178
4179 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4180 if (!buf) {
4181 status = -ENOMEM;
4182 goto err0;
4183 }
4184
4185 status = bus_register(&spi_bus_type);
4186 if (status < 0)
4187 goto err1;
4188
4189 status = class_register(&spi_master_class);
4190 if (status < 0)
4191 goto err2;
4192
4193 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4194 status = class_register(&spi_slave_class);
4195 if (status < 0)
4196 goto err3;
4197 }
4198
4199 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4200 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4201 if (IS_ENABLED(CONFIG_ACPI))
4202 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4203
4204 return 0;
4205
4206 err3:
4207 class_unregister(&spi_master_class);
4208 err2:
4209 bus_unregister(&spi_bus_type);
4210 err1:
4211 kfree(buf);
4212 buf = NULL;
4213 err0:
4214 return status;
4215 }
4216
4217 /* board_info is normally registered in arch_initcall(),
4218 * but even essential drivers wait till later
4219 *
4220 * REVISIT only boardinfo really needs static linking. the rest (device and
4221 * driver registration) _could_ be dynamically linked (modular) ... costs
4222 * include needing to have boardinfo data structures be much more public.
4223 */
4224 postcore_initcall(spi_init);