]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/base/platform.c
Merge branch 'sfc-fix-bugs-introduced-by-XDP-patches'
[mirror_ubuntu-jammy-kernel.git] / drivers / base / platform.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * platform.c - platform 'pseudo' bus for legacy devices
4 *
5 * Copyright (c) 2002-3 Patrick Mochel
6 * Copyright (c) 2002-3 Open Source Development Labs
7 *
8 * Please see Documentation/driver-api/driver-model/platform.rst for more
9 * information.
10 */
11
12 #include <linux/string.h>
13 #include <linux/platform_device.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pm_domain.h>
24 #include <linux/idr.h>
25 #include <linux/acpi.h>
26 #include <linux/clk/clk-conf.h>
27 #include <linux/limits.h>
28 #include <linux/property.h>
29 #include <linux/kmemleak.h>
30
31 #include "base.h"
32 #include "power/power.h"
33
34 /* For automatically allocated device IDs */
35 static DEFINE_IDA(platform_devid_ida);
36
37 struct device platform_bus = {
38 .init_name = "platform",
39 };
40 EXPORT_SYMBOL_GPL(platform_bus);
41
42 /**
43 * platform_get_resource - get a resource for a device
44 * @dev: platform device
45 * @type: resource type
46 * @num: resource index
47 */
48 struct resource *platform_get_resource(struct platform_device *dev,
49 unsigned int type, unsigned int num)
50 {
51 int i;
52
53 for (i = 0; i < dev->num_resources; i++) {
54 struct resource *r = &dev->resource[i];
55
56 if (type == resource_type(r) && num-- == 0)
57 return r;
58 }
59 return NULL;
60 }
61 EXPORT_SYMBOL_GPL(platform_get_resource);
62
63 #ifdef CONFIG_HAS_IOMEM
64 /**
65 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
66 * device
67 *
68 * @pdev: platform device to use both for memory resource lookup as well as
69 * resource management
70 * @index: resource index
71 */
72 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
73 unsigned int index)
74 {
75 struct resource *res;
76
77 res = platform_get_resource(pdev, IORESOURCE_MEM, index);
78 return devm_ioremap_resource(&pdev->dev, res);
79 }
80 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
81
82 /**
83 * devm_platform_ioremap_resource_wc - write-combined variant of
84 * devm_platform_ioremap_resource()
85 *
86 * @pdev: platform device to use both for memory resource lookup as well as
87 * resource management
88 * @index: resource index
89 */
90 void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
91 unsigned int index)
92 {
93 struct resource *res;
94
95 res = platform_get_resource(pdev, IORESOURCE_MEM, index);
96 return devm_ioremap_resource_wc(&pdev->dev, res);
97 }
98
99 /**
100 * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
101 * a platform device, retrieve the
102 * resource by name
103 *
104 * @pdev: platform device to use both for memory resource lookup as well as
105 * resource management
106 * @name: name of the resource
107 */
108 void __iomem *
109 devm_platform_ioremap_resource_byname(struct platform_device *pdev,
110 const char *name)
111 {
112 struct resource *res;
113
114 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
115 return devm_ioremap_resource(&pdev->dev, res);
116 }
117 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
118 #endif /* CONFIG_HAS_IOMEM */
119
120 /**
121 * platform_get_irq_optional - get an optional IRQ for a device
122 * @dev: platform device
123 * @num: IRQ number index
124 *
125 * Gets an IRQ for a platform device. Device drivers should check the return
126 * value for errors so as to not pass a negative integer value to the
127 * request_irq() APIs. This is the same as platform_get_irq(), except that it
128 * does not print an error message if an IRQ can not be obtained.
129 *
130 * Example:
131 * int irq = platform_get_irq_optional(pdev, 0);
132 * if (irq < 0)
133 * return irq;
134 *
135 * Return: IRQ number on success, negative error number on failure.
136 */
137 int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
138 {
139 #ifdef CONFIG_SPARC
140 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
141 if (!dev || num >= dev->archdata.num_irqs)
142 return -ENXIO;
143 return dev->archdata.irqs[num];
144 #else
145 struct resource *r;
146 int ret;
147
148 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
149 ret = of_irq_get(dev->dev.of_node, num);
150 if (ret > 0 || ret == -EPROBE_DEFER)
151 return ret;
152 }
153
154 r = platform_get_resource(dev, IORESOURCE_IRQ, num);
155 if (has_acpi_companion(&dev->dev)) {
156 if (r && r->flags & IORESOURCE_DISABLED) {
157 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
158 if (ret)
159 return ret;
160 }
161 }
162
163 /*
164 * The resources may pass trigger flags to the irqs that need
165 * to be set up. It so happens that the trigger flags for
166 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
167 * settings.
168 */
169 if (r && r->flags & IORESOURCE_BITS) {
170 struct irq_data *irqd;
171
172 irqd = irq_get_irq_data(r->start);
173 if (!irqd)
174 return -ENXIO;
175 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
176 }
177
178 if (r)
179 return r->start;
180
181 /*
182 * For the index 0 interrupt, allow falling back to GpioInt
183 * resources. While a device could have both Interrupt and GpioInt
184 * resources, making this fallback ambiguous, in many common cases
185 * the device will only expose one IRQ, and this fallback
186 * allows a common code path across either kind of resource.
187 */
188 if (num == 0 && has_acpi_companion(&dev->dev)) {
189 ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
190 /* Our callers expect -ENXIO for missing IRQs. */
191 if (ret >= 0 || ret == -EPROBE_DEFER)
192 return ret;
193 }
194
195 return -ENXIO;
196 #endif
197 }
198 EXPORT_SYMBOL_GPL(platform_get_irq_optional);
199
200 /**
201 * platform_get_irq - get an IRQ for a device
202 * @dev: platform device
203 * @num: IRQ number index
204 *
205 * Gets an IRQ for a platform device and prints an error message if finding the
206 * IRQ fails. Device drivers should check the return value for errors so as to
207 * not pass a negative integer value to the request_irq() APIs.
208 *
209 * Example:
210 * int irq = platform_get_irq(pdev, 0);
211 * if (irq < 0)
212 * return irq;
213 *
214 * Return: IRQ number on success, negative error number on failure.
215 */
216 int platform_get_irq(struct platform_device *dev, unsigned int num)
217 {
218 int ret;
219
220 ret = platform_get_irq_optional(dev, num);
221 if (ret < 0 && ret != -EPROBE_DEFER)
222 dev_err(&dev->dev, "IRQ index %u not found\n", num);
223
224 return ret;
225 }
226 EXPORT_SYMBOL_GPL(platform_get_irq);
227
228 /**
229 * platform_irq_count - Count the number of IRQs a platform device uses
230 * @dev: platform device
231 *
232 * Return: Number of IRQs a platform device uses or EPROBE_DEFER
233 */
234 int platform_irq_count(struct platform_device *dev)
235 {
236 int ret, nr = 0;
237
238 while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
239 nr++;
240
241 if (ret == -EPROBE_DEFER)
242 return ret;
243
244 return nr;
245 }
246 EXPORT_SYMBOL_GPL(platform_irq_count);
247
248 /**
249 * platform_get_resource_byname - get a resource for a device by name
250 * @dev: platform device
251 * @type: resource type
252 * @name: resource name
253 */
254 struct resource *platform_get_resource_byname(struct platform_device *dev,
255 unsigned int type,
256 const char *name)
257 {
258 int i;
259
260 for (i = 0; i < dev->num_resources; i++) {
261 struct resource *r = &dev->resource[i];
262
263 if (unlikely(!r->name))
264 continue;
265
266 if (type == resource_type(r) && !strcmp(r->name, name))
267 return r;
268 }
269 return NULL;
270 }
271 EXPORT_SYMBOL_GPL(platform_get_resource_byname);
272
273 static int __platform_get_irq_byname(struct platform_device *dev,
274 const char *name)
275 {
276 struct resource *r;
277 int ret;
278
279 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
280 ret = of_irq_get_byname(dev->dev.of_node, name);
281 if (ret > 0 || ret == -EPROBE_DEFER)
282 return ret;
283 }
284
285 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
286 if (r)
287 return r->start;
288
289 return -ENXIO;
290 }
291
292 /**
293 * platform_get_irq_byname - get an IRQ for a device by name
294 * @dev: platform device
295 * @name: IRQ name
296 *
297 * Get an IRQ like platform_get_irq(), but then by name rather then by index.
298 *
299 * Return: IRQ number on success, negative error number on failure.
300 */
301 int platform_get_irq_byname(struct platform_device *dev, const char *name)
302 {
303 int ret;
304
305 ret = __platform_get_irq_byname(dev, name);
306 if (ret < 0 && ret != -EPROBE_DEFER)
307 dev_err(&dev->dev, "IRQ %s not found\n", name);
308
309 return ret;
310 }
311 EXPORT_SYMBOL_GPL(platform_get_irq_byname);
312
313 /**
314 * platform_get_irq_byname_optional - get an optional IRQ for a device by name
315 * @dev: platform device
316 * @name: IRQ name
317 *
318 * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
319 * does not print an error message if an IRQ can not be obtained.
320 *
321 * Return: IRQ number on success, negative error number on failure.
322 */
323 int platform_get_irq_byname_optional(struct platform_device *dev,
324 const char *name)
325 {
326 return __platform_get_irq_byname(dev, name);
327 }
328 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
329
330 /**
331 * platform_add_devices - add a numbers of platform devices
332 * @devs: array of platform devices to add
333 * @num: number of platform devices in array
334 */
335 int platform_add_devices(struct platform_device **devs, int num)
336 {
337 int i, ret = 0;
338
339 for (i = 0; i < num; i++) {
340 ret = platform_device_register(devs[i]);
341 if (ret) {
342 while (--i >= 0)
343 platform_device_unregister(devs[i]);
344 break;
345 }
346 }
347
348 return ret;
349 }
350 EXPORT_SYMBOL_GPL(platform_add_devices);
351
352 struct platform_object {
353 struct platform_device pdev;
354 char name[];
355 };
356
357 /*
358 * Set up default DMA mask for platform devices if the they weren't
359 * previously set by the architecture / DT.
360 */
361 static void setup_pdev_dma_masks(struct platform_device *pdev)
362 {
363 if (!pdev->dev.coherent_dma_mask)
364 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
365 if (!pdev->dma_mask)
366 pdev->dma_mask = DMA_BIT_MASK(32);
367 if (!pdev->dev.dma_mask)
368 pdev->dev.dma_mask = &pdev->dma_mask;
369 };
370
371 /**
372 * platform_device_put - destroy a platform device
373 * @pdev: platform device to free
374 *
375 * Free all memory associated with a platform device. This function must
376 * _only_ be externally called in error cases. All other usage is a bug.
377 */
378 void platform_device_put(struct platform_device *pdev)
379 {
380 if (!IS_ERR_OR_NULL(pdev))
381 put_device(&pdev->dev);
382 }
383 EXPORT_SYMBOL_GPL(platform_device_put);
384
385 static void platform_device_release(struct device *dev)
386 {
387 struct platform_object *pa = container_of(dev, struct platform_object,
388 pdev.dev);
389
390 of_device_node_put(&pa->pdev.dev);
391 kfree(pa->pdev.dev.platform_data);
392 kfree(pa->pdev.mfd_cell);
393 kfree(pa->pdev.resource);
394 kfree(pa->pdev.driver_override);
395 kfree(pa);
396 }
397
398 /**
399 * platform_device_alloc - create a platform device
400 * @name: base name of the device we're adding
401 * @id: instance id
402 *
403 * Create a platform device object which can have other objects attached
404 * to it, and which will have attached objects freed when it is released.
405 */
406 struct platform_device *platform_device_alloc(const char *name, int id)
407 {
408 struct platform_object *pa;
409
410 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
411 if (pa) {
412 strcpy(pa->name, name);
413 pa->pdev.name = pa->name;
414 pa->pdev.id = id;
415 device_initialize(&pa->pdev.dev);
416 pa->pdev.dev.release = platform_device_release;
417 setup_pdev_dma_masks(&pa->pdev);
418 }
419
420 return pa ? &pa->pdev : NULL;
421 }
422 EXPORT_SYMBOL_GPL(platform_device_alloc);
423
424 /**
425 * platform_device_add_resources - add resources to a platform device
426 * @pdev: platform device allocated by platform_device_alloc to add resources to
427 * @res: set of resources that needs to be allocated for the device
428 * @num: number of resources
429 *
430 * Add a copy of the resources to the platform device. The memory
431 * associated with the resources will be freed when the platform device is
432 * released.
433 */
434 int platform_device_add_resources(struct platform_device *pdev,
435 const struct resource *res, unsigned int num)
436 {
437 struct resource *r = NULL;
438
439 if (res) {
440 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
441 if (!r)
442 return -ENOMEM;
443 }
444
445 kfree(pdev->resource);
446 pdev->resource = r;
447 pdev->num_resources = num;
448 return 0;
449 }
450 EXPORT_SYMBOL_GPL(platform_device_add_resources);
451
452 /**
453 * platform_device_add_data - add platform-specific data to a platform device
454 * @pdev: platform device allocated by platform_device_alloc to add resources to
455 * @data: platform specific data for this platform device
456 * @size: size of platform specific data
457 *
458 * Add a copy of platform specific data to the platform device's
459 * platform_data pointer. The memory associated with the platform data
460 * will be freed when the platform device is released.
461 */
462 int platform_device_add_data(struct platform_device *pdev, const void *data,
463 size_t size)
464 {
465 void *d = NULL;
466
467 if (data) {
468 d = kmemdup(data, size, GFP_KERNEL);
469 if (!d)
470 return -ENOMEM;
471 }
472
473 kfree(pdev->dev.platform_data);
474 pdev->dev.platform_data = d;
475 return 0;
476 }
477 EXPORT_SYMBOL_GPL(platform_device_add_data);
478
479 /**
480 * platform_device_add_properties - add built-in properties to a platform device
481 * @pdev: platform device to add properties to
482 * @properties: null terminated array of properties to add
483 *
484 * The function will take deep copy of @properties and attach the copy to the
485 * platform device. The memory associated with properties will be freed when the
486 * platform device is released.
487 */
488 int platform_device_add_properties(struct platform_device *pdev,
489 const struct property_entry *properties)
490 {
491 return device_add_properties(&pdev->dev, properties);
492 }
493 EXPORT_SYMBOL_GPL(platform_device_add_properties);
494
495 /**
496 * platform_device_add - add a platform device to device hierarchy
497 * @pdev: platform device we're adding
498 *
499 * This is part 2 of platform_device_register(), though may be called
500 * separately _iff_ pdev was allocated by platform_device_alloc().
501 */
502 int platform_device_add(struct platform_device *pdev)
503 {
504 int i, ret;
505
506 if (!pdev)
507 return -EINVAL;
508
509 if (!pdev->dev.parent)
510 pdev->dev.parent = &platform_bus;
511
512 pdev->dev.bus = &platform_bus_type;
513
514 switch (pdev->id) {
515 default:
516 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
517 break;
518 case PLATFORM_DEVID_NONE:
519 dev_set_name(&pdev->dev, "%s", pdev->name);
520 break;
521 case PLATFORM_DEVID_AUTO:
522 /*
523 * Automatically allocated device ID. We mark it as such so
524 * that we remember it must be freed, and we append a suffix
525 * to avoid namespace collision with explicit IDs.
526 */
527 ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
528 if (ret < 0)
529 goto err_out;
530 pdev->id = ret;
531 pdev->id_auto = true;
532 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
533 break;
534 }
535
536 for (i = 0; i < pdev->num_resources; i++) {
537 struct resource *p, *r = &pdev->resource[i];
538
539 if (r->name == NULL)
540 r->name = dev_name(&pdev->dev);
541
542 p = r->parent;
543 if (!p) {
544 if (resource_type(r) == IORESOURCE_MEM)
545 p = &iomem_resource;
546 else if (resource_type(r) == IORESOURCE_IO)
547 p = &ioport_resource;
548 }
549
550 if (p) {
551 ret = insert_resource(p, r);
552 if (ret) {
553 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
554 goto failed;
555 }
556 }
557 }
558
559 pr_debug("Registering platform device '%s'. Parent at %s\n",
560 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
561
562 ret = device_add(&pdev->dev);
563 if (ret == 0)
564 return ret;
565
566 failed:
567 if (pdev->id_auto) {
568 ida_simple_remove(&platform_devid_ida, pdev->id);
569 pdev->id = PLATFORM_DEVID_AUTO;
570 }
571
572 while (--i >= 0) {
573 struct resource *r = &pdev->resource[i];
574 if (r->parent)
575 release_resource(r);
576 }
577
578 err_out:
579 return ret;
580 }
581 EXPORT_SYMBOL_GPL(platform_device_add);
582
583 /**
584 * platform_device_del - remove a platform-level device
585 * @pdev: platform device we're removing
586 *
587 * Note that this function will also release all memory- and port-based
588 * resources owned by the device (@dev->resource). This function must
589 * _only_ be externally called in error cases. All other usage is a bug.
590 */
591 void platform_device_del(struct platform_device *pdev)
592 {
593 int i;
594
595 if (!IS_ERR_OR_NULL(pdev)) {
596 device_del(&pdev->dev);
597
598 if (pdev->id_auto) {
599 ida_simple_remove(&platform_devid_ida, pdev->id);
600 pdev->id = PLATFORM_DEVID_AUTO;
601 }
602
603 for (i = 0; i < pdev->num_resources; i++) {
604 struct resource *r = &pdev->resource[i];
605 if (r->parent)
606 release_resource(r);
607 }
608 }
609 }
610 EXPORT_SYMBOL_GPL(platform_device_del);
611
612 /**
613 * platform_device_register - add a platform-level device
614 * @pdev: platform device we're adding
615 */
616 int platform_device_register(struct platform_device *pdev)
617 {
618 device_initialize(&pdev->dev);
619 setup_pdev_dma_masks(pdev);
620 return platform_device_add(pdev);
621 }
622 EXPORT_SYMBOL_GPL(platform_device_register);
623
624 /**
625 * platform_device_unregister - unregister a platform-level device
626 * @pdev: platform device we're unregistering
627 *
628 * Unregistration is done in 2 steps. First we release all resources
629 * and remove it from the subsystem, then we drop reference count by
630 * calling platform_device_put().
631 */
632 void platform_device_unregister(struct platform_device *pdev)
633 {
634 platform_device_del(pdev);
635 platform_device_put(pdev);
636 }
637 EXPORT_SYMBOL_GPL(platform_device_unregister);
638
639 /**
640 * platform_device_register_full - add a platform-level device with
641 * resources and platform-specific data
642 *
643 * @pdevinfo: data used to create device
644 *
645 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
646 */
647 struct platform_device *platform_device_register_full(
648 const struct platform_device_info *pdevinfo)
649 {
650 int ret = -ENOMEM;
651 struct platform_device *pdev;
652
653 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
654 if (!pdev)
655 return ERR_PTR(-ENOMEM);
656
657 pdev->dev.parent = pdevinfo->parent;
658 pdev->dev.fwnode = pdevinfo->fwnode;
659 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
660 pdev->dev.of_node_reused = pdevinfo->of_node_reused;
661
662 if (pdevinfo->dma_mask) {
663 /*
664 * This memory isn't freed when the device is put,
665 * I don't have a nice idea for that though. Conceptually
666 * dma_mask in struct device should not be a pointer.
667 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
668 */
669 pdev->dev.dma_mask =
670 kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
671 if (!pdev->dev.dma_mask)
672 goto err;
673
674 kmemleak_ignore(pdev->dev.dma_mask);
675
676 *pdev->dev.dma_mask = pdevinfo->dma_mask;
677 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
678 }
679
680 ret = platform_device_add_resources(pdev,
681 pdevinfo->res, pdevinfo->num_res);
682 if (ret)
683 goto err;
684
685 ret = platform_device_add_data(pdev,
686 pdevinfo->data, pdevinfo->size_data);
687 if (ret)
688 goto err;
689
690 if (pdevinfo->properties) {
691 ret = platform_device_add_properties(pdev,
692 pdevinfo->properties);
693 if (ret)
694 goto err;
695 }
696
697 ret = platform_device_add(pdev);
698 if (ret) {
699 err:
700 ACPI_COMPANION_SET(&pdev->dev, NULL);
701 kfree(pdev->dev.dma_mask);
702 platform_device_put(pdev);
703 return ERR_PTR(ret);
704 }
705
706 return pdev;
707 }
708 EXPORT_SYMBOL_GPL(platform_device_register_full);
709
710 static int platform_drv_probe(struct device *_dev)
711 {
712 struct platform_driver *drv = to_platform_driver(_dev->driver);
713 struct platform_device *dev = to_platform_device(_dev);
714 int ret;
715
716 ret = of_clk_set_defaults(_dev->of_node, false);
717 if (ret < 0)
718 return ret;
719
720 ret = dev_pm_domain_attach(_dev, true);
721 if (ret)
722 goto out;
723
724 if (drv->probe) {
725 ret = drv->probe(dev);
726 if (ret)
727 dev_pm_domain_detach(_dev, true);
728 }
729
730 out:
731 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
732 dev_warn(_dev, "probe deferral not supported\n");
733 ret = -ENXIO;
734 }
735
736 return ret;
737 }
738
739 static int platform_drv_probe_fail(struct device *_dev)
740 {
741 return -ENXIO;
742 }
743
744 static int platform_drv_remove(struct device *_dev)
745 {
746 struct platform_driver *drv = to_platform_driver(_dev->driver);
747 struct platform_device *dev = to_platform_device(_dev);
748 int ret = 0;
749
750 if (drv->remove)
751 ret = drv->remove(dev);
752 dev_pm_domain_detach(_dev, true);
753
754 return ret;
755 }
756
757 static void platform_drv_shutdown(struct device *_dev)
758 {
759 struct platform_driver *drv = to_platform_driver(_dev->driver);
760 struct platform_device *dev = to_platform_device(_dev);
761
762 if (drv->shutdown)
763 drv->shutdown(dev);
764 }
765
766 /**
767 * __platform_driver_register - register a driver for platform-level devices
768 * @drv: platform driver structure
769 * @owner: owning module/driver
770 */
771 int __platform_driver_register(struct platform_driver *drv,
772 struct module *owner)
773 {
774 drv->driver.owner = owner;
775 drv->driver.bus = &platform_bus_type;
776 drv->driver.probe = platform_drv_probe;
777 drv->driver.remove = platform_drv_remove;
778 drv->driver.shutdown = platform_drv_shutdown;
779
780 return driver_register(&drv->driver);
781 }
782 EXPORT_SYMBOL_GPL(__platform_driver_register);
783
784 /**
785 * platform_driver_unregister - unregister a driver for platform-level devices
786 * @drv: platform driver structure
787 */
788 void platform_driver_unregister(struct platform_driver *drv)
789 {
790 driver_unregister(&drv->driver);
791 }
792 EXPORT_SYMBOL_GPL(platform_driver_unregister);
793
794 /**
795 * __platform_driver_probe - register driver for non-hotpluggable device
796 * @drv: platform driver structure
797 * @probe: the driver probe routine, probably from an __init section
798 * @module: module which will be the owner of the driver
799 *
800 * Use this instead of platform_driver_register() when you know the device
801 * is not hotpluggable and has already been registered, and you want to
802 * remove its run-once probe() infrastructure from memory after the driver
803 * has bound to the device.
804 *
805 * One typical use for this would be with drivers for controllers integrated
806 * into system-on-chip processors, where the controller devices have been
807 * configured as part of board setup.
808 *
809 * Note that this is incompatible with deferred probing.
810 *
811 * Returns zero if the driver registered and bound to a device, else returns
812 * a negative error code and with the driver not registered.
813 */
814 int __init_or_module __platform_driver_probe(struct platform_driver *drv,
815 int (*probe)(struct platform_device *), struct module *module)
816 {
817 int retval, code;
818
819 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
820 pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
821 drv->driver.name, __func__);
822 return -EINVAL;
823 }
824
825 /*
826 * We have to run our probes synchronously because we check if
827 * we find any devices to bind to and exit with error if there
828 * are any.
829 */
830 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
831
832 /*
833 * Prevent driver from requesting probe deferral to avoid further
834 * futile probe attempts.
835 */
836 drv->prevent_deferred_probe = true;
837
838 /* make sure driver won't have bind/unbind attributes */
839 drv->driver.suppress_bind_attrs = true;
840
841 /* temporary section violation during probe() */
842 drv->probe = probe;
843 retval = code = __platform_driver_register(drv, module);
844
845 /*
846 * Fixup that section violation, being paranoid about code scanning
847 * the list of drivers in order to probe new devices. Check to see
848 * if the probe was successful, and make sure any forced probes of
849 * new devices fail.
850 */
851 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
852 drv->probe = NULL;
853 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
854 retval = -ENODEV;
855 drv->driver.probe = platform_drv_probe_fail;
856 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
857
858 if (code != retval)
859 platform_driver_unregister(drv);
860 return retval;
861 }
862 EXPORT_SYMBOL_GPL(__platform_driver_probe);
863
864 /**
865 * __platform_create_bundle - register driver and create corresponding device
866 * @driver: platform driver structure
867 * @probe: the driver probe routine, probably from an __init section
868 * @res: set of resources that needs to be allocated for the device
869 * @n_res: number of resources
870 * @data: platform specific data for this platform device
871 * @size: size of platform specific data
872 * @module: module which will be the owner of the driver
873 *
874 * Use this in legacy-style modules that probe hardware directly and
875 * register a single platform device and corresponding platform driver.
876 *
877 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
878 */
879 struct platform_device * __init_or_module __platform_create_bundle(
880 struct platform_driver *driver,
881 int (*probe)(struct platform_device *),
882 struct resource *res, unsigned int n_res,
883 const void *data, size_t size, struct module *module)
884 {
885 struct platform_device *pdev;
886 int error;
887
888 pdev = platform_device_alloc(driver->driver.name, -1);
889 if (!pdev) {
890 error = -ENOMEM;
891 goto err_out;
892 }
893
894 error = platform_device_add_resources(pdev, res, n_res);
895 if (error)
896 goto err_pdev_put;
897
898 error = platform_device_add_data(pdev, data, size);
899 if (error)
900 goto err_pdev_put;
901
902 error = platform_device_add(pdev);
903 if (error)
904 goto err_pdev_put;
905
906 error = __platform_driver_probe(driver, probe, module);
907 if (error)
908 goto err_pdev_del;
909
910 return pdev;
911
912 err_pdev_del:
913 platform_device_del(pdev);
914 err_pdev_put:
915 platform_device_put(pdev);
916 err_out:
917 return ERR_PTR(error);
918 }
919 EXPORT_SYMBOL_GPL(__platform_create_bundle);
920
921 /**
922 * __platform_register_drivers - register an array of platform drivers
923 * @drivers: an array of drivers to register
924 * @count: the number of drivers to register
925 * @owner: module owning the drivers
926 *
927 * Registers platform drivers specified by an array. On failure to register a
928 * driver, all previously registered drivers will be unregistered. Callers of
929 * this API should use platform_unregister_drivers() to unregister drivers in
930 * the reverse order.
931 *
932 * Returns: 0 on success or a negative error code on failure.
933 */
934 int __platform_register_drivers(struct platform_driver * const *drivers,
935 unsigned int count, struct module *owner)
936 {
937 unsigned int i;
938 int err;
939
940 for (i = 0; i < count; i++) {
941 pr_debug("registering platform driver %ps\n", drivers[i]);
942
943 err = __platform_driver_register(drivers[i], owner);
944 if (err < 0) {
945 pr_err("failed to register platform driver %ps: %d\n",
946 drivers[i], err);
947 goto error;
948 }
949 }
950
951 return 0;
952
953 error:
954 while (i--) {
955 pr_debug("unregistering platform driver %ps\n", drivers[i]);
956 platform_driver_unregister(drivers[i]);
957 }
958
959 return err;
960 }
961 EXPORT_SYMBOL_GPL(__platform_register_drivers);
962
963 /**
964 * platform_unregister_drivers - unregister an array of platform drivers
965 * @drivers: an array of drivers to unregister
966 * @count: the number of drivers to unregister
967 *
968 * Unegisters platform drivers specified by an array. This is typically used
969 * to complement an earlier call to platform_register_drivers(). Drivers are
970 * unregistered in the reverse order in which they were registered.
971 */
972 void platform_unregister_drivers(struct platform_driver * const *drivers,
973 unsigned int count)
974 {
975 while (count--) {
976 pr_debug("unregistering platform driver %ps\n", drivers[count]);
977 platform_driver_unregister(drivers[count]);
978 }
979 }
980 EXPORT_SYMBOL_GPL(platform_unregister_drivers);
981
982 /* modalias support enables more hands-off userspace setup:
983 * (a) environment variable lets new-style hotplug events work once system is
984 * fully running: "modprobe $MODALIAS"
985 * (b) sysfs attribute lets new-style coldplug recover from hotplug events
986 * mishandled before system is fully running: "modprobe $(cat modalias)"
987 */
988 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
989 char *buf)
990 {
991 struct platform_device *pdev = to_platform_device(dev);
992 int len;
993
994 len = of_device_modalias(dev, buf, PAGE_SIZE);
995 if (len != -ENODEV)
996 return len;
997
998 len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
999 if (len != -ENODEV)
1000 return len;
1001
1002 len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
1003
1004 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
1005 }
1006 static DEVICE_ATTR_RO(modalias);
1007
1008 static ssize_t driver_override_store(struct device *dev,
1009 struct device_attribute *attr,
1010 const char *buf, size_t count)
1011 {
1012 struct platform_device *pdev = to_platform_device(dev);
1013 char *driver_override, *old, *cp;
1014
1015 /* We need to keep extra room for a newline */
1016 if (count >= (PAGE_SIZE - 1))
1017 return -EINVAL;
1018
1019 driver_override = kstrndup(buf, count, GFP_KERNEL);
1020 if (!driver_override)
1021 return -ENOMEM;
1022
1023 cp = strchr(driver_override, '\n');
1024 if (cp)
1025 *cp = '\0';
1026
1027 device_lock(dev);
1028 old = pdev->driver_override;
1029 if (strlen(driver_override)) {
1030 pdev->driver_override = driver_override;
1031 } else {
1032 kfree(driver_override);
1033 pdev->driver_override = NULL;
1034 }
1035 device_unlock(dev);
1036
1037 kfree(old);
1038
1039 return count;
1040 }
1041
1042 static ssize_t driver_override_show(struct device *dev,
1043 struct device_attribute *attr, char *buf)
1044 {
1045 struct platform_device *pdev = to_platform_device(dev);
1046 ssize_t len;
1047
1048 device_lock(dev);
1049 len = sprintf(buf, "%s\n", pdev->driver_override);
1050 device_unlock(dev);
1051 return len;
1052 }
1053 static DEVICE_ATTR_RW(driver_override);
1054
1055
1056 static struct attribute *platform_dev_attrs[] = {
1057 &dev_attr_modalias.attr,
1058 &dev_attr_driver_override.attr,
1059 NULL,
1060 };
1061 ATTRIBUTE_GROUPS(platform_dev);
1062
1063 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
1064 {
1065 struct platform_device *pdev = to_platform_device(dev);
1066 int rc;
1067
1068 /* Some devices have extra OF data and an OF-style MODALIAS */
1069 rc = of_device_uevent_modalias(dev, env);
1070 if (rc != -ENODEV)
1071 return rc;
1072
1073 rc = acpi_device_uevent_modalias(dev, env);
1074 if (rc != -ENODEV)
1075 return rc;
1076
1077 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
1078 pdev->name);
1079 return 0;
1080 }
1081
1082 static const struct platform_device_id *platform_match_id(
1083 const struct platform_device_id *id,
1084 struct platform_device *pdev)
1085 {
1086 while (id->name[0]) {
1087 if (strcmp(pdev->name, id->name) == 0) {
1088 pdev->id_entry = id;
1089 return id;
1090 }
1091 id++;
1092 }
1093 return NULL;
1094 }
1095
1096 /**
1097 * platform_match - bind platform device to platform driver.
1098 * @dev: device.
1099 * @drv: driver.
1100 *
1101 * Platform device IDs are assumed to be encoded like this:
1102 * "<name><instance>", where <name> is a short description of the type of
1103 * device, like "pci" or "floppy", and <instance> is the enumerated
1104 * instance of the device, like '0' or '42'. Driver IDs are simply
1105 * "<name>". So, extract the <name> from the platform_device structure,
1106 * and compare it against the name of the driver. Return whether they match
1107 * or not.
1108 */
1109 static int platform_match(struct device *dev, struct device_driver *drv)
1110 {
1111 struct platform_device *pdev = to_platform_device(dev);
1112 struct platform_driver *pdrv = to_platform_driver(drv);
1113
1114 /* When driver_override is set, only bind to the matching driver */
1115 if (pdev->driver_override)
1116 return !strcmp(pdev->driver_override, drv->name);
1117
1118 /* Attempt an OF style match first */
1119 if (of_driver_match_device(dev, drv))
1120 return 1;
1121
1122 /* Then try ACPI style match */
1123 if (acpi_driver_match_device(dev, drv))
1124 return 1;
1125
1126 /* Then try to match against the id table */
1127 if (pdrv->id_table)
1128 return platform_match_id(pdrv->id_table, pdev) != NULL;
1129
1130 /* fall-back to driver name match */
1131 return (strcmp(pdev->name, drv->name) == 0);
1132 }
1133
1134 #ifdef CONFIG_PM_SLEEP
1135
1136 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
1137 {
1138 struct platform_driver *pdrv = to_platform_driver(dev->driver);
1139 struct platform_device *pdev = to_platform_device(dev);
1140 int ret = 0;
1141
1142 if (dev->driver && pdrv->suspend)
1143 ret = pdrv->suspend(pdev, mesg);
1144
1145 return ret;
1146 }
1147
1148 static int platform_legacy_resume(struct device *dev)
1149 {
1150 struct platform_driver *pdrv = to_platform_driver(dev->driver);
1151 struct platform_device *pdev = to_platform_device(dev);
1152 int ret = 0;
1153
1154 if (dev->driver && pdrv->resume)
1155 ret = pdrv->resume(pdev);
1156
1157 return ret;
1158 }
1159
1160 #endif /* CONFIG_PM_SLEEP */
1161
1162 #ifdef CONFIG_SUSPEND
1163
1164 int platform_pm_suspend(struct device *dev)
1165 {
1166 struct device_driver *drv = dev->driver;
1167 int ret = 0;
1168
1169 if (!drv)
1170 return 0;
1171
1172 if (drv->pm) {
1173 if (drv->pm->suspend)
1174 ret = drv->pm->suspend(dev);
1175 } else {
1176 ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
1177 }
1178
1179 return ret;
1180 }
1181
1182 int platform_pm_resume(struct device *dev)
1183 {
1184 struct device_driver *drv = dev->driver;
1185 int ret = 0;
1186
1187 if (!drv)
1188 return 0;
1189
1190 if (drv->pm) {
1191 if (drv->pm->resume)
1192 ret = drv->pm->resume(dev);
1193 } else {
1194 ret = platform_legacy_resume(dev);
1195 }
1196
1197 return ret;
1198 }
1199
1200 #endif /* CONFIG_SUSPEND */
1201
1202 #ifdef CONFIG_HIBERNATE_CALLBACKS
1203
1204 int platform_pm_freeze(struct device *dev)
1205 {
1206 struct device_driver *drv = dev->driver;
1207 int ret = 0;
1208
1209 if (!drv)
1210 return 0;
1211
1212 if (drv->pm) {
1213 if (drv->pm->freeze)
1214 ret = drv->pm->freeze(dev);
1215 } else {
1216 ret = platform_legacy_suspend(dev, PMSG_FREEZE);
1217 }
1218
1219 return ret;
1220 }
1221
1222 int platform_pm_thaw(struct device *dev)
1223 {
1224 struct device_driver *drv = dev->driver;
1225 int ret = 0;
1226
1227 if (!drv)
1228 return 0;
1229
1230 if (drv->pm) {
1231 if (drv->pm->thaw)
1232 ret = drv->pm->thaw(dev);
1233 } else {
1234 ret = platform_legacy_resume(dev);
1235 }
1236
1237 return ret;
1238 }
1239
1240 int platform_pm_poweroff(struct device *dev)
1241 {
1242 struct device_driver *drv = dev->driver;
1243 int ret = 0;
1244
1245 if (!drv)
1246 return 0;
1247
1248 if (drv->pm) {
1249 if (drv->pm->poweroff)
1250 ret = drv->pm->poweroff(dev);
1251 } else {
1252 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
1253 }
1254
1255 return ret;
1256 }
1257
1258 int platform_pm_restore(struct device *dev)
1259 {
1260 struct device_driver *drv = dev->driver;
1261 int ret = 0;
1262
1263 if (!drv)
1264 return 0;
1265
1266 if (drv->pm) {
1267 if (drv->pm->restore)
1268 ret = drv->pm->restore(dev);
1269 } else {
1270 ret = platform_legacy_resume(dev);
1271 }
1272
1273 return ret;
1274 }
1275
1276 #endif /* CONFIG_HIBERNATE_CALLBACKS */
1277
1278 int platform_dma_configure(struct device *dev)
1279 {
1280 enum dev_dma_attr attr;
1281 int ret = 0;
1282
1283 if (dev->of_node) {
1284 ret = of_dma_configure(dev, dev->of_node, true);
1285 } else if (has_acpi_companion(dev)) {
1286 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
1287 ret = acpi_dma_configure(dev, attr);
1288 }
1289
1290 return ret;
1291 }
1292
1293 static const struct dev_pm_ops platform_dev_pm_ops = {
1294 .runtime_suspend = pm_generic_runtime_suspend,
1295 .runtime_resume = pm_generic_runtime_resume,
1296 USE_PLATFORM_PM_SLEEP_OPS
1297 };
1298
1299 struct bus_type platform_bus_type = {
1300 .name = "platform",
1301 .dev_groups = platform_dev_groups,
1302 .match = platform_match,
1303 .uevent = platform_uevent,
1304 .dma_configure = platform_dma_configure,
1305 .pm = &platform_dev_pm_ops,
1306 };
1307 EXPORT_SYMBOL_GPL(platform_bus_type);
1308
1309 static inline int __platform_match(struct device *dev, const void *drv)
1310 {
1311 return platform_match(dev, (struct device_driver *)drv);
1312 }
1313
1314 /**
1315 * platform_find_device_by_driver - Find a platform device with a given
1316 * driver.
1317 * @start: The device to start the search from.
1318 * @drv: The device driver to look for.
1319 */
1320 struct device *platform_find_device_by_driver(struct device *start,
1321 const struct device_driver *drv)
1322 {
1323 return bus_find_device(&platform_bus_type, start, drv,
1324 __platform_match);
1325 }
1326 EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
1327
1328 int __init platform_bus_init(void)
1329 {
1330 int error;
1331
1332 error = device_register(&platform_bus);
1333 if (error) {
1334 put_device(&platform_bus);
1335 return error;
1336 }
1337 error = bus_register(&platform_bus_type);
1338 if (error)
1339 device_unregister(&platform_bus);
1340 of_platform_register_reconfig_notifier();
1341 return error;
1342 }