1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IBM PowerPC Virtual I/O Infrastructure Support.
5 * Copyright (c) 2003,2008 IBM Corp.
6 * Dave Engebretsen engebret@us.ibm.com
7 * Santiago Leon santil@us.ibm.com
8 * Hollis Blanchard <hollisb@us.ibm.com>
10 * Robert Jennings <rcjenn@us.ibm.com>
13 #include <linux/cpu.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/stat.h>
17 #include <linux/device.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <linux/console.h>
21 #include <linux/export.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/kobject.h>
26 #include <asm/iommu.h>
30 #include <asm/firmware.h>
33 #include <asm/hvcall.h>
35 static struct vio_dev vio_bus_device
= { /* fake "parent" device */
38 .dev
.init_name
= "vio",
39 .dev
.bus
= &vio_bus_type
,
42 #ifdef CONFIG_PPC_SMLPAR
44 * vio_cmo_pool - A pool of IO memory for CMO use
46 * @size: The size of the pool in bytes
47 * @free: The amount of free memory in the pool
54 /* How many ms to delay queued balance work */
55 #define VIO_CMO_BALANCE_DELAY 100
57 /* Portion out IO memory to CMO devices by this chunk size */
58 #define VIO_CMO_BALANCE_CHUNK 131072
61 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
63 * @vio_dev: struct vio_dev pointer
64 * @list: pointer to other devices on bus that are being tracked
66 struct vio_cmo_dev_entry
{
67 struct vio_dev
*viodev
;
68 struct list_head list
;
72 * vio_cmo - VIO bus accounting structure for CMO entitlement
74 * @lock: spinlock for entire structure
75 * @balance_q: work queue for balancing system entitlement
76 * @device_list: list of CMO-enabled devices requiring entitlement
77 * @entitled: total system entitlement in bytes
78 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
79 * @excess: pool of excess entitlement not needed for device reserves or spare
80 * @spare: IO memory for device hotplug functionality
81 * @min: minimum necessary for system operation
82 * @desired: desired memory for system operation
83 * @curr: bytes currently allocated
84 * @high: high water mark for IO data usage
86 static struct vio_cmo
{
88 struct delayed_work balance_q
;
89 struct list_head device_list
;
91 struct vio_cmo_pool reserve
;
92 struct vio_cmo_pool excess
;
101 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
103 static int vio_cmo_num_OF_devs(void)
105 struct device_node
*node_vroot
;
109 * Count the number of vdevice entries with an
110 * ibm,my-dma-window OF property
112 node_vroot
= of_find_node_by_name(NULL
, "vdevice");
114 struct device_node
*of_node
;
115 struct property
*prop
;
117 for_each_child_of_node(node_vroot
, of_node
) {
118 prop
= of_find_property(of_node
, "ibm,my-dma-window",
124 of_node_put(node_vroot
);
129 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
131 * @viodev: VIO device requesting IO memory
132 * @size: size of allocation requested
134 * Allocations come from memory reserved for the devices and any excess
135 * IO memory available to all devices. The spare pool used to service
136 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
140 * 0 for successful allocation and -ENOMEM for a failure
142 static inline int vio_cmo_alloc(struct vio_dev
*viodev
, size_t size
)
145 size_t reserve_free
= 0;
146 size_t excess_free
= 0;
149 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
151 /* Determine the amount of free entitlement available in reserve */
152 if (viodev
->cmo
.entitled
> viodev
->cmo
.allocated
)
153 reserve_free
= viodev
->cmo
.entitled
- viodev
->cmo
.allocated
;
155 /* If spare is not fulfilled, the excess pool can not be used. */
156 if (vio_cmo
.spare
>= VIO_CMO_MIN_ENT
)
157 excess_free
= vio_cmo
.excess
.free
;
159 /* The request can be satisfied */
160 if ((reserve_free
+ excess_free
) >= size
) {
161 vio_cmo
.curr
+= size
;
162 if (vio_cmo
.curr
> vio_cmo
.high
)
163 vio_cmo
.high
= vio_cmo
.curr
;
164 viodev
->cmo
.allocated
+= size
;
165 size
-= min(reserve_free
, size
);
166 vio_cmo
.excess
.free
-= size
;
170 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
175 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
176 * @viodev: VIO device freeing IO memory
177 * @size: size of deallocation
179 * IO memory is freed by the device back to the correct memory pools.
180 * The spare pool is replenished first from either memory pool, then
181 * the reserve pool is used to reduce device entitlement, the excess
182 * pool is used to increase the reserve pool toward the desired entitlement
183 * target, and then the remaining memory is returned to the pools.
186 static inline void vio_cmo_dealloc(struct vio_dev
*viodev
, size_t size
)
189 size_t spare_needed
= 0;
190 size_t excess_freed
= 0;
191 size_t reserve_freed
= size
;
195 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
196 vio_cmo
.curr
-= size
;
198 /* Amount of memory freed from the excess pool */
199 if (viodev
->cmo
.allocated
> viodev
->cmo
.entitled
) {
200 excess_freed
= min(reserve_freed
, (viodev
->cmo
.allocated
-
201 viodev
->cmo
.entitled
));
202 reserve_freed
-= excess_freed
;
205 /* Remove allocation from device */
206 viodev
->cmo
.allocated
-= (reserve_freed
+ excess_freed
);
208 /* Spare is a subset of the reserve pool, replenish it first. */
209 spare_needed
= VIO_CMO_MIN_ENT
- vio_cmo
.spare
;
212 * Replenish the spare in the reserve pool from the excess pool.
213 * This moves entitlement into the reserve pool.
215 if (spare_needed
&& excess_freed
) {
216 tmp
= min(excess_freed
, spare_needed
);
217 vio_cmo
.excess
.size
-= tmp
;
218 vio_cmo
.reserve
.size
+= tmp
;
219 vio_cmo
.spare
+= tmp
;
226 * Replenish the spare in the reserve pool from the reserve pool.
227 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
228 * if needed, and gives it to the spare pool. The amount of used
229 * memory in this pool does not change.
231 if (spare_needed
&& reserve_freed
) {
232 tmp
= min3(spare_needed
, reserve_freed
, (viodev
->cmo
.entitled
- VIO_CMO_MIN_ENT
));
234 vio_cmo
.spare
+= tmp
;
235 viodev
->cmo
.entitled
-= tmp
;
236 reserve_freed
-= tmp
;
242 * Increase the reserve pool until the desired allocation is met.
243 * Move an allocation freed from the excess pool into the reserve
244 * pool and schedule a balance operation.
246 if (excess_freed
&& (vio_cmo
.desired
> vio_cmo
.reserve
.size
)) {
247 tmp
= min(excess_freed
, (vio_cmo
.desired
- vio_cmo
.reserve
.size
));
249 vio_cmo
.excess
.size
-= tmp
;
250 vio_cmo
.reserve
.size
+= tmp
;
255 /* Return memory from the excess pool to that pool */
257 vio_cmo
.excess
.free
+= excess_freed
;
260 schedule_delayed_work(&vio_cmo
.balance_q
, VIO_CMO_BALANCE_DELAY
);
261 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
265 * vio_cmo_entitlement_update - Manage system entitlement changes
267 * @new_entitlement: new system entitlement to attempt to accommodate
269 * Increases in entitlement will be used to fulfill the spare entitlement
270 * and the rest is given to the excess pool. Decreases, if they are
271 * possible, come from the excess pool and from unused device entitlement
273 * Returns: 0 on success, -ENOMEM when change can not be made
275 int vio_cmo_entitlement_update(size_t new_entitlement
)
277 struct vio_dev
*viodev
;
278 struct vio_cmo_dev_entry
*dev_ent
;
280 size_t avail
, delta
, tmp
;
282 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
284 /* Entitlement increases */
285 if (new_entitlement
> vio_cmo
.entitled
) {
286 delta
= new_entitlement
- vio_cmo
.entitled
;
288 /* Fulfill spare allocation */
289 if (vio_cmo
.spare
< VIO_CMO_MIN_ENT
) {
290 tmp
= min(delta
, (VIO_CMO_MIN_ENT
- vio_cmo
.spare
));
291 vio_cmo
.spare
+= tmp
;
292 vio_cmo
.reserve
.size
+= tmp
;
296 /* Remaining new allocation goes to the excess pool */
297 vio_cmo
.entitled
+= delta
;
298 vio_cmo
.excess
.size
+= delta
;
299 vio_cmo
.excess
.free
+= delta
;
304 /* Entitlement decreases */
305 delta
= vio_cmo
.entitled
- new_entitlement
;
306 avail
= vio_cmo
.excess
.free
;
309 * Need to check how much unused entitlement each device can
310 * sacrifice to fulfill entitlement change.
312 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
316 viodev
= dev_ent
->viodev
;
317 if ((viodev
->cmo
.entitled
> viodev
->cmo
.allocated
) &&
318 (viodev
->cmo
.entitled
> VIO_CMO_MIN_ENT
))
319 avail
+= viodev
->cmo
.entitled
-
320 max_t(size_t, viodev
->cmo
.allocated
,
324 if (delta
<= avail
) {
325 vio_cmo
.entitled
-= delta
;
327 /* Take entitlement from the excess pool first */
328 tmp
= min(vio_cmo
.excess
.free
, delta
);
329 vio_cmo
.excess
.size
-= tmp
;
330 vio_cmo
.excess
.free
-= tmp
;
334 * Remove all but VIO_CMO_MIN_ENT bytes from devices
335 * until entitlement change is served
337 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
341 viodev
= dev_ent
->viodev
;
343 if ((viodev
->cmo
.entitled
> viodev
->cmo
.allocated
) &&
344 (viodev
->cmo
.entitled
> VIO_CMO_MIN_ENT
))
345 tmp
= viodev
->cmo
.entitled
-
346 max_t(size_t, viodev
->cmo
.allocated
,
348 viodev
->cmo
.entitled
-= min(tmp
, delta
);
349 delta
-= min(tmp
, delta
);
352 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
357 schedule_delayed_work(&vio_cmo
.balance_q
, 0);
358 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
363 * vio_cmo_balance - Balance entitlement among devices
365 * @work: work queue structure for this operation
367 * Any system entitlement above the minimum needed for devices, or
368 * already allocated to devices, can be distributed to the devices.
369 * The list of devices is iterated through to recalculate the desired
370 * entitlement level and to determine how much entitlement above the
371 * minimum entitlement is allocated to devices.
373 * Small chunks of the available entitlement are given to devices until
374 * their requirements are fulfilled or there is no entitlement left to give.
375 * Upon completion sizes of the reserve and excess pools are calculated.
377 * The system minimum entitlement level is also recalculated here.
378 * Entitlement will be reserved for devices even after vio_bus_remove to
379 * accommodate reloading the driver. The OF tree is walked to count the
380 * number of devices present and this will remove entitlement for devices
381 * that have actually left the system after having vio_bus_remove called.
383 static void vio_cmo_balance(struct work_struct
*work
)
386 struct vio_dev
*viodev
;
387 struct vio_cmo_dev_entry
*dev_ent
;
389 size_t avail
= 0, level
, chunk
, need
;
390 int devcount
= 0, fulfilled
;
392 cmo
= container_of(work
, struct vio_cmo
, balance_q
.work
);
394 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
396 /* Calculate minimum entitlement and fulfill spare */
397 cmo
->min
= vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT
;
398 BUG_ON(cmo
->min
> cmo
->entitled
);
399 cmo
->spare
= min_t(size_t, VIO_CMO_MIN_ENT
, (cmo
->entitled
- cmo
->min
));
400 cmo
->min
+= cmo
->spare
;
401 cmo
->desired
= cmo
->min
;
404 * Determine how much entitlement is available and reset device
407 avail
= cmo
->entitled
- cmo
->spare
;
408 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
409 viodev
= dev_ent
->viodev
;
411 viodev
->cmo
.entitled
= VIO_CMO_MIN_ENT
;
412 cmo
->desired
+= (viodev
->cmo
.desired
- VIO_CMO_MIN_ENT
);
413 avail
-= max_t(size_t, viodev
->cmo
.allocated
, VIO_CMO_MIN_ENT
);
417 * Having provided each device with the minimum entitlement, loop
418 * over the devices portioning out the remaining entitlement
419 * until there is nothing left.
421 level
= VIO_CMO_MIN_ENT
;
424 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
425 viodev
= dev_ent
->viodev
;
427 if (viodev
->cmo
.desired
<= level
) {
433 * Give the device up to VIO_CMO_BALANCE_CHUNK
434 * bytes of entitlement, but do not exceed the
435 * desired level of entitlement for the device.
437 chunk
= min_t(size_t, avail
, VIO_CMO_BALANCE_CHUNK
);
438 chunk
= min(chunk
, (viodev
->cmo
.desired
-
439 viodev
->cmo
.entitled
));
440 viodev
->cmo
.entitled
+= chunk
;
443 * If the memory for this entitlement increase was
444 * already allocated to the device it does not come
445 * from the available pool being portioned out.
447 need
= max(viodev
->cmo
.allocated
, viodev
->cmo
.entitled
)-
448 max(viodev
->cmo
.allocated
, level
);
452 if (fulfilled
== devcount
)
454 level
+= VIO_CMO_BALANCE_CHUNK
;
457 /* Calculate new reserve and excess pool sizes */
458 cmo
->reserve
.size
= cmo
->min
;
459 cmo
->excess
.free
= 0;
460 cmo
->excess
.size
= 0;
462 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
463 viodev
= dev_ent
->viodev
;
464 /* Calculated reserve size above the minimum entitlement */
465 if (viodev
->cmo
.entitled
)
466 cmo
->reserve
.size
+= (viodev
->cmo
.entitled
-
468 /* Calculated used excess entitlement */
469 if (viodev
->cmo
.allocated
> viodev
->cmo
.entitled
)
470 need
+= viodev
->cmo
.allocated
- viodev
->cmo
.entitled
;
472 cmo
->excess
.size
= cmo
->entitled
- cmo
->reserve
.size
;
473 cmo
->excess
.free
= cmo
->excess
.size
- need
;
475 cancel_delayed_work(to_delayed_work(work
));
476 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
479 static void *vio_dma_iommu_alloc_coherent(struct device
*dev
, size_t size
,
480 dma_addr_t
*dma_handle
, gfp_t flag
,
483 struct vio_dev
*viodev
= to_vio_dev(dev
);
486 if (vio_cmo_alloc(viodev
, roundup(size
, PAGE_SIZE
))) {
487 atomic_inc(&viodev
->cmo
.allocs_failed
);
491 ret
= iommu_alloc_coherent(dev
, get_iommu_table_base(dev
), size
,
492 dma_handle
, dev
->coherent_dma_mask
, flag
,
494 if (unlikely(ret
== NULL
)) {
495 vio_cmo_dealloc(viodev
, roundup(size
, PAGE_SIZE
));
496 atomic_inc(&viodev
->cmo
.allocs_failed
);
502 static void vio_dma_iommu_free_coherent(struct device
*dev
, size_t size
,
503 void *vaddr
, dma_addr_t dma_handle
,
506 struct vio_dev
*viodev
= to_vio_dev(dev
);
508 iommu_free_coherent(get_iommu_table_base(dev
), size
, vaddr
, dma_handle
);
509 vio_cmo_dealloc(viodev
, roundup(size
, PAGE_SIZE
));
512 static dma_addr_t
vio_dma_iommu_map_page(struct device
*dev
, struct page
*page
,
513 unsigned long offset
, size_t size
,
514 enum dma_data_direction direction
,
517 struct vio_dev
*viodev
= to_vio_dev(dev
);
518 struct iommu_table
*tbl
= get_iommu_table_base(dev
);
519 dma_addr_t ret
= DMA_MAPPING_ERROR
;
521 if (vio_cmo_alloc(viodev
, roundup(size
, IOMMU_PAGE_SIZE(tbl
))))
523 ret
= iommu_map_page(dev
, tbl
, page
, offset
, size
, device_to_mask(dev
),
525 if (unlikely(ret
== DMA_MAPPING_ERROR
))
530 vio_cmo_dealloc(viodev
, roundup(size
, IOMMU_PAGE_SIZE(tbl
)));
532 atomic_inc(&viodev
->cmo
.allocs_failed
);
533 return DMA_MAPPING_ERROR
;
536 static void vio_dma_iommu_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
538 enum dma_data_direction direction
,
541 struct vio_dev
*viodev
= to_vio_dev(dev
);
542 struct iommu_table
*tbl
= get_iommu_table_base(dev
);
544 iommu_unmap_page(tbl
, dma_handle
, size
, direction
, attrs
);
545 vio_cmo_dealloc(viodev
, roundup(size
, IOMMU_PAGE_SIZE(tbl
)));
548 static int vio_dma_iommu_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
549 int nelems
, enum dma_data_direction direction
,
552 struct vio_dev
*viodev
= to_vio_dev(dev
);
553 struct iommu_table
*tbl
= get_iommu_table_base(dev
);
554 struct scatterlist
*sgl
;
556 size_t alloc_size
= 0;
558 for_each_sg(sglist
, sgl
, nelems
, count
)
559 alloc_size
+= roundup(sgl
->length
, IOMMU_PAGE_SIZE(tbl
));
561 if (vio_cmo_alloc(viodev
, alloc_size
))
563 ret
= ppc_iommu_map_sg(dev
, tbl
, sglist
, nelems
, device_to_mask(dev
),
568 for_each_sg(sglist
, sgl
, ret
, count
)
569 alloc_size
-= roundup(sgl
->dma_length
, IOMMU_PAGE_SIZE(tbl
));
571 vio_cmo_dealloc(viodev
, alloc_size
);
575 vio_cmo_dealloc(viodev
, alloc_size
);
577 atomic_inc(&viodev
->cmo
.allocs_failed
);
581 static void vio_dma_iommu_unmap_sg(struct device
*dev
,
582 struct scatterlist
*sglist
, int nelems
,
583 enum dma_data_direction direction
,
586 struct vio_dev
*viodev
= to_vio_dev(dev
);
587 struct iommu_table
*tbl
= get_iommu_table_base(dev
);
588 struct scatterlist
*sgl
;
589 size_t alloc_size
= 0;
592 for_each_sg(sglist
, sgl
, nelems
, count
)
593 alloc_size
+= roundup(sgl
->dma_length
, IOMMU_PAGE_SIZE(tbl
));
595 ppc_iommu_unmap_sg(tbl
, sglist
, nelems
, direction
, attrs
);
596 vio_cmo_dealloc(viodev
, alloc_size
);
599 static const struct dma_map_ops vio_dma_mapping_ops
= {
600 .alloc
= vio_dma_iommu_alloc_coherent
,
601 .free
= vio_dma_iommu_free_coherent
,
602 .map_sg
= vio_dma_iommu_map_sg
,
603 .unmap_sg
= vio_dma_iommu_unmap_sg
,
604 .map_page
= vio_dma_iommu_map_page
,
605 .unmap_page
= vio_dma_iommu_unmap_page
,
606 .dma_supported
= dma_iommu_dma_supported
,
607 .get_required_mask
= dma_iommu_get_required_mask
,
611 * vio_cmo_set_dev_desired - Set desired entitlement for a device
613 * @viodev: struct vio_dev for device to alter
614 * @desired: new desired entitlement level in bytes
616 * For use by devices to request a change to their entitlement at runtime or
617 * through sysfs. The desired entitlement level is changed and a balancing
618 * of system resources is scheduled to run in the future.
620 void vio_cmo_set_dev_desired(struct vio_dev
*viodev
, size_t desired
)
623 struct vio_cmo_dev_entry
*dev_ent
;
626 if (!firmware_has_feature(FW_FEATURE_CMO
))
629 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
630 if (desired
< VIO_CMO_MIN_ENT
)
631 desired
= VIO_CMO_MIN_ENT
;
634 * Changes will not be made for devices not in the device list.
635 * If it is not in the device list, then no driver is loaded
636 * for the device and it can not receive entitlement.
638 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
)
639 if (viodev
== dev_ent
->viodev
) {
644 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
648 /* Increase/decrease in desired device entitlement */
649 if (desired
>= viodev
->cmo
.desired
) {
650 /* Just bump the bus and device values prior to a balance*/
651 vio_cmo
.desired
+= desired
- viodev
->cmo
.desired
;
652 viodev
->cmo
.desired
= desired
;
654 /* Decrease bus and device values for desired entitlement */
655 vio_cmo
.desired
-= viodev
->cmo
.desired
- desired
;
656 viodev
->cmo
.desired
= desired
;
658 * If less entitlement is desired than current entitlement, move
659 * any reserve memory in the change region to the excess pool.
661 if (viodev
->cmo
.entitled
> desired
) {
662 vio_cmo
.reserve
.size
-= viodev
->cmo
.entitled
- desired
;
663 vio_cmo
.excess
.size
+= viodev
->cmo
.entitled
- desired
;
665 * If entitlement moving from the reserve pool to the
666 * excess pool is currently unused, add to the excess
669 if (viodev
->cmo
.allocated
< viodev
->cmo
.entitled
)
670 vio_cmo
.excess
.free
+= viodev
->cmo
.entitled
-
671 max(viodev
->cmo
.allocated
, desired
);
672 viodev
->cmo
.entitled
= desired
;
675 schedule_delayed_work(&vio_cmo
.balance_q
, 0);
676 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
680 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
682 * @viodev - Pointer to struct vio_dev for device
684 * Determine the devices IO memory entitlement needs, attempting
685 * to satisfy the system minimum entitlement at first and scheduling
686 * a balance operation to take care of the rest at a later time.
688 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
689 * -ENOMEM when entitlement is not available for device or
693 static int vio_cmo_bus_probe(struct vio_dev
*viodev
)
695 struct vio_cmo_dev_entry
*dev_ent
;
696 struct device
*dev
= &viodev
->dev
;
697 struct iommu_table
*tbl
;
698 struct vio_driver
*viodrv
= to_vio_driver(dev
->driver
);
701 bool dma_capable
= false;
703 tbl
= get_iommu_table_base(dev
);
705 /* A device requires entitlement if it has a DMA window property */
706 switch (viodev
->family
) {
708 if (of_get_property(viodev
->dev
.of_node
,
709 "ibm,my-dma-window", NULL
))
716 dev_warn(dev
, "unknown device family: %d\n", viodev
->family
);
721 /* Configure entitlement for the device. */
723 /* Check that the driver is CMO enabled and get desired DMA */
724 if (!viodrv
->get_desired_dma
) {
725 dev_err(dev
, "%s: device driver does not support CMO\n",
730 viodev
->cmo
.desired
=
731 IOMMU_PAGE_ALIGN(viodrv
->get_desired_dma(viodev
), tbl
);
732 if (viodev
->cmo
.desired
< VIO_CMO_MIN_ENT
)
733 viodev
->cmo
.desired
= VIO_CMO_MIN_ENT
;
734 size
= VIO_CMO_MIN_ENT
;
736 dev_ent
= kmalloc(sizeof(struct vio_cmo_dev_entry
),
741 dev_ent
->viodev
= viodev
;
742 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
743 list_add(&dev_ent
->list
, &vio_cmo
.device_list
);
745 viodev
->cmo
.desired
= 0;
747 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
751 * If the needs for vio_cmo.min have not changed since they
752 * were last set, the number of devices in the OF tree has
753 * been constant and the IO memory for this is already in
756 if (vio_cmo
.min
== ((vio_cmo_num_OF_devs() + 1) *
758 /* Updated desired entitlement if device requires it */
760 vio_cmo
.desired
+= (viodev
->cmo
.desired
-
765 tmp
= vio_cmo
.spare
+ vio_cmo
.excess
.free
;
767 dev_err(dev
, "%s: insufficient free "
768 "entitlement to add device. "
769 "Need %lu, have %lu\n", __func__
,
770 size
, (vio_cmo
.spare
+ tmp
));
771 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
775 /* Use excess pool first to fulfill request */
776 tmp
= min(size
, vio_cmo
.excess
.free
);
777 vio_cmo
.excess
.free
-= tmp
;
778 vio_cmo
.excess
.size
-= tmp
;
779 vio_cmo
.reserve
.size
+= tmp
;
781 /* Use spare if excess pool was insufficient */
782 vio_cmo
.spare
-= size
- tmp
;
784 /* Update bus accounting */
786 vio_cmo
.desired
+= viodev
->cmo
.desired
;
788 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
793 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
795 * @viodev - Pointer to struct vio_dev for device
797 * Remove the device from the cmo device list. The minimum entitlement
798 * will be reserved for the device as long as it is in the system. The
799 * rest of the entitlement the device had been allocated will be returned
802 static void vio_cmo_bus_remove(struct vio_dev
*viodev
)
804 struct vio_cmo_dev_entry
*dev_ent
;
808 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
809 if (viodev
->cmo
.allocated
) {
810 dev_err(&viodev
->dev
, "%s: device had %lu bytes of IO "
811 "allocated after remove operation.\n",
812 __func__
, viodev
->cmo
.allocated
);
817 * Remove the device from the device list being maintained for
818 * CMO enabled devices.
820 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
)
821 if (viodev
== dev_ent
->viodev
) {
822 list_del(&dev_ent
->list
);
828 * Devices may not require any entitlement and they do not need
829 * to be processed. Otherwise, return the device's entitlement
832 if (viodev
->cmo
.entitled
) {
834 * This device has not yet left the OF tree, it's
835 * minimum entitlement remains in vio_cmo.min and
838 vio_cmo
.desired
-= (viodev
->cmo
.desired
- VIO_CMO_MIN_ENT
);
841 * Save min allocation for device in reserve as long
842 * as it exists in OF tree as determined by later
845 viodev
->cmo
.entitled
-= VIO_CMO_MIN_ENT
;
847 /* Replenish spare from freed reserve pool */
848 if (viodev
->cmo
.entitled
&& (vio_cmo
.spare
< VIO_CMO_MIN_ENT
)) {
849 tmp
= min(viodev
->cmo
.entitled
, (VIO_CMO_MIN_ENT
-
851 vio_cmo
.spare
+= tmp
;
852 viodev
->cmo
.entitled
-= tmp
;
855 /* Remaining reserve goes to excess pool */
856 vio_cmo
.excess
.size
+= viodev
->cmo
.entitled
;
857 vio_cmo
.excess
.free
+= viodev
->cmo
.entitled
;
858 vio_cmo
.reserve
.size
-= viodev
->cmo
.entitled
;
861 * Until the device is removed it will keep a
862 * minimum entitlement; this will guarantee that
863 * a module unload/load will result in a success.
865 viodev
->cmo
.entitled
= VIO_CMO_MIN_ENT
;
866 viodev
->cmo
.desired
= VIO_CMO_MIN_ENT
;
867 atomic_set(&viodev
->cmo
.allocs_failed
, 0);
870 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
873 static void vio_cmo_set_dma_ops(struct vio_dev
*viodev
)
875 set_dma_ops(&viodev
->dev
, &vio_dma_mapping_ops
);
879 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
881 * Set up the reserve and excess entitlement pools based on available
882 * system entitlement and the number of devices in the OF tree that
883 * require entitlement in the reserve pool.
885 static void vio_cmo_bus_init(void)
887 struct hvcall_mpp_data mpp_data
;
890 memset(&vio_cmo
, 0, sizeof(struct vio_cmo
));
891 spin_lock_init(&vio_cmo
.lock
);
892 INIT_LIST_HEAD(&vio_cmo
.device_list
);
893 INIT_DELAYED_WORK(&vio_cmo
.balance_q
, vio_cmo_balance
);
895 /* Get current system entitlement */
896 err
= h_get_mpp(&mpp_data
);
899 * On failure, continue with entitlement set to 0, will panic()
900 * later when spare is reserved.
902 if (err
!= H_SUCCESS
) {
903 printk(KERN_ERR
"%s: unable to determine system IO "\
904 "entitlement. (%d)\n", __func__
, err
);
905 vio_cmo
.entitled
= 0;
907 vio_cmo
.entitled
= mpp_data
.entitled_mem
;
910 /* Set reservation and check against entitlement */
911 vio_cmo
.spare
= VIO_CMO_MIN_ENT
;
912 vio_cmo
.reserve
.size
= vio_cmo
.spare
;
913 vio_cmo
.reserve
.size
+= (vio_cmo_num_OF_devs() *
915 if (vio_cmo
.reserve
.size
> vio_cmo
.entitled
) {
916 printk(KERN_ERR
"%s: insufficient system entitlement\n",
918 panic("%s: Insufficient system entitlement", __func__
);
921 /* Set the remaining accounting variables */
922 vio_cmo
.excess
.size
= vio_cmo
.entitled
- vio_cmo
.reserve
.size
;
923 vio_cmo
.excess
.free
= vio_cmo
.excess
.size
;
924 vio_cmo
.min
= vio_cmo
.reserve
.size
;
925 vio_cmo
.desired
= vio_cmo
.reserve
.size
;
928 /* sysfs device functions and data structures for CMO */
930 #define viodev_cmo_rd_attr(name) \
931 static ssize_t cmo_##name##_show(struct device *dev, \
932 struct device_attribute *attr, \
935 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
938 static ssize_t
cmo_allocs_failed_show(struct device
*dev
,
939 struct device_attribute
*attr
, char *buf
)
941 struct vio_dev
*viodev
= to_vio_dev(dev
);
942 return sprintf(buf
, "%d\n", atomic_read(&viodev
->cmo
.allocs_failed
));
945 static ssize_t
cmo_allocs_failed_store(struct device
*dev
,
946 struct device_attribute
*attr
, const char *buf
, size_t count
)
948 struct vio_dev
*viodev
= to_vio_dev(dev
);
949 atomic_set(&viodev
->cmo
.allocs_failed
, 0);
953 static ssize_t
cmo_desired_store(struct device
*dev
,
954 struct device_attribute
*attr
, const char *buf
, size_t count
)
956 struct vio_dev
*viodev
= to_vio_dev(dev
);
960 ret
= kstrtoul(buf
, 10, &new_desired
);
964 vio_cmo_set_dev_desired(viodev
, new_desired
);
968 viodev_cmo_rd_attr(desired
);
969 viodev_cmo_rd_attr(entitled
);
970 viodev_cmo_rd_attr(allocated
);
972 static ssize_t
name_show(struct device
*, struct device_attribute
*, char *);
973 static ssize_t
devspec_show(struct device
*, struct device_attribute
*, char *);
974 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
977 static struct device_attribute dev_attr_name
;
978 static struct device_attribute dev_attr_devspec
;
979 static struct device_attribute dev_attr_modalias
;
981 static DEVICE_ATTR_RO(cmo_entitled
);
982 static DEVICE_ATTR_RO(cmo_allocated
);
983 static DEVICE_ATTR_RW(cmo_desired
);
984 static DEVICE_ATTR_RW(cmo_allocs_failed
);
986 static struct attribute
*vio_cmo_dev_attrs
[] = {
988 &dev_attr_devspec
.attr
,
989 &dev_attr_modalias
.attr
,
990 &dev_attr_cmo_entitled
.attr
,
991 &dev_attr_cmo_allocated
.attr
,
992 &dev_attr_cmo_desired
.attr
,
993 &dev_attr_cmo_allocs_failed
.attr
,
996 ATTRIBUTE_GROUPS(vio_cmo_dev
);
998 /* sysfs bus functions and data structures for CMO */
1000 #define viobus_cmo_rd_attr(name) \
1001 static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf) \
1003 return sprintf(buf, "%lu\n", vio_cmo.name); \
1005 static struct bus_attribute bus_attr_cmo_bus_##name = \
1006 __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL)
1008 #define viobus_cmo_pool_rd_attr(name, var) \
1010 cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1012 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1014 static BUS_ATTR_RO(cmo_##name##_##var)
1016 viobus_cmo_rd_attr(entitled
);
1017 viobus_cmo_rd_attr(spare
);
1018 viobus_cmo_rd_attr(min
);
1019 viobus_cmo_rd_attr(desired
);
1020 viobus_cmo_rd_attr(curr
);
1021 viobus_cmo_pool_rd_attr(reserve
, size
);
1022 viobus_cmo_pool_rd_attr(excess
, size
);
1023 viobus_cmo_pool_rd_attr(excess
, free
);
1025 static ssize_t
cmo_high_show(struct bus_type
*bt
, char *buf
)
1027 return sprintf(buf
, "%lu\n", vio_cmo
.high
);
1030 static ssize_t
cmo_high_store(struct bus_type
*bt
, const char *buf
,
1033 unsigned long flags
;
1035 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
1036 vio_cmo
.high
= vio_cmo
.curr
;
1037 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
1041 static BUS_ATTR_RW(cmo_high
);
1043 static struct attribute
*vio_bus_attrs
[] = {
1044 &bus_attr_cmo_bus_entitled
.attr
,
1045 &bus_attr_cmo_bus_spare
.attr
,
1046 &bus_attr_cmo_bus_min
.attr
,
1047 &bus_attr_cmo_bus_desired
.attr
,
1048 &bus_attr_cmo_bus_curr
.attr
,
1049 &bus_attr_cmo_high
.attr
,
1050 &bus_attr_cmo_reserve_size
.attr
,
1051 &bus_attr_cmo_excess_size
.attr
,
1052 &bus_attr_cmo_excess_free
.attr
,
1055 ATTRIBUTE_GROUPS(vio_bus
);
1057 static void vio_cmo_sysfs_init(void)
1059 vio_bus_type
.dev_groups
= vio_cmo_dev_groups
;
1060 vio_bus_type
.bus_groups
= vio_bus_groups
;
1062 #else /* CONFIG_PPC_SMLPAR */
1063 int vio_cmo_entitlement_update(size_t new_entitlement
) { return 0; }
1064 void vio_cmo_set_dev_desired(struct vio_dev
*viodev
, size_t desired
) {}
1065 static int vio_cmo_bus_probe(struct vio_dev
*viodev
) { return 0; }
1066 static void vio_cmo_bus_remove(struct vio_dev
*viodev
) {}
1067 static void vio_cmo_set_dma_ops(struct vio_dev
*viodev
) {}
1068 static void vio_cmo_bus_init(void) {}
1069 static void vio_cmo_sysfs_init(void) { }
1070 #endif /* CONFIG_PPC_SMLPAR */
1071 EXPORT_SYMBOL(vio_cmo_entitlement_update
);
1072 EXPORT_SYMBOL(vio_cmo_set_dev_desired
);
1076 * Platform Facilities Option (PFO) support
1080 * vio_h_cop_sync - Perform a synchronous PFO co-processor operation
1082 * @vdev - Pointer to a struct vio_dev for device
1083 * @op - Pointer to a struct vio_pfo_op for the operation parameters
1085 * Calls the hypervisor to synchronously perform the PFO operation
1086 * described in @op. In the case of a busy response from the hypervisor,
1087 * the operation will be re-submitted indefinitely unless a non-zero timeout
1088 * is specified or an error occurs. The timeout places a limit on when to
1089 * stop re-submitting a operation, the total time can be exceeded if an
1090 * operation is in progress.
1092 * If op->hcall_ret is not NULL, this will be set to the return from the
1093 * last h_cop_op call or it will be 0 if an error not involving the h_call
1098 * -EINVAL if the h_call fails due to an invalid parameter,
1099 * -E2BIG if the h_call can not be performed synchronously,
1100 * -EBUSY if a timeout is specified and has elapsed,
1101 * -EACCES if the memory area for data/status has been rescinded, or
1102 * -EPERM if a hardware fault has been indicated
1104 int vio_h_cop_sync(struct vio_dev
*vdev
, struct vio_pfo_op
*op
)
1106 struct device
*dev
= &vdev
->dev
;
1107 unsigned long deadline
= 0;
1112 deadline
= jiffies
+ msecs_to_jiffies(op
->timeout
);
1115 hret
= plpar_hcall_norets(H_COP
, op
->flags
,
1117 op
->in
, op
->inlen
, op
->out
,
1118 op
->outlen
, op
->csbcpb
);
1120 if (hret
== H_SUCCESS
||
1121 (hret
!= H_NOT_ENOUGH_RESOURCES
&&
1122 hret
!= H_BUSY
&& hret
!= H_RESOURCE
) ||
1123 (op
->timeout
&& time_after(deadline
, jiffies
)))
1126 dev_dbg(dev
, "%s: hcall ret(%ld), retrying.\n", __func__
, hret
);
1143 case H_NOT_ENOUGH_RESOURCES
:
1154 dev_dbg(dev
, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1155 __func__
, ret
, hret
);
1157 op
->hcall_err
= hret
;
1160 EXPORT_SYMBOL(vio_h_cop_sync
);
1162 static struct iommu_table
*vio_build_iommu_table(struct vio_dev
*dev
)
1164 const __be32
*dma_window
;
1165 struct iommu_table
*tbl
;
1166 unsigned long offset
, size
;
1168 dma_window
= of_get_property(dev
->dev
.of_node
,
1169 "ibm,my-dma-window", NULL
);
1173 tbl
= kzalloc(sizeof(*tbl
), GFP_KERNEL
);
1177 of_parse_dma_window(dev
->dev
.of_node
, dma_window
,
1178 &tbl
->it_index
, &offset
, &size
);
1180 /* TCE table size - measured in tce entries */
1181 tbl
->it_page_shift
= IOMMU_PAGE_SHIFT_4K
;
1182 tbl
->it_size
= size
>> tbl
->it_page_shift
;
1183 /* offset for VIO should always be 0 */
1184 tbl
->it_offset
= offset
>> tbl
->it_page_shift
;
1186 tbl
->it_type
= TCE_VB
;
1187 tbl
->it_blocksize
= 16;
1189 if (firmware_has_feature(FW_FEATURE_LPAR
))
1190 tbl
->it_ops
= &iommu_table_lpar_multi_ops
;
1192 tbl
->it_ops
= &iommu_table_pseries_ops
;
1194 return iommu_init_table(tbl
, -1);
1198 * vio_match_device: - Tell if a VIO device has a matching
1199 * VIO device id structure.
1200 * @ids: array of VIO device id structures to search in
1201 * @dev: the VIO device structure to match against
1203 * Used by a driver to check whether a VIO device present in the
1204 * system is in its list of supported devices. Returns the matching
1205 * vio_device_id structure or NULL if there is no match.
1207 static const struct vio_device_id
*vio_match_device(
1208 const struct vio_device_id
*ids
, const struct vio_dev
*dev
)
1210 while (ids
->type
[0] != '\0') {
1211 if ((strncmp(dev
->type
, ids
->type
, strlen(ids
->type
)) == 0) &&
1212 of_device_is_compatible(dev
->dev
.of_node
,
1221 * Convert from struct device to struct vio_dev and pass to driver.
1222 * dev->driver has already been set by generic code because vio_bus_match
1225 static int vio_bus_probe(struct device
*dev
)
1227 struct vio_dev
*viodev
= to_vio_dev(dev
);
1228 struct vio_driver
*viodrv
= to_vio_driver(dev
->driver
);
1229 const struct vio_device_id
*id
;
1230 int error
= -ENODEV
;
1235 id
= vio_match_device(viodrv
->id_table
, viodev
);
1237 memset(&viodev
->cmo
, 0, sizeof(viodev
->cmo
));
1238 if (firmware_has_feature(FW_FEATURE_CMO
)) {
1239 error
= vio_cmo_bus_probe(viodev
);
1243 error
= viodrv
->probe(viodev
, id
);
1244 if (error
&& firmware_has_feature(FW_FEATURE_CMO
))
1245 vio_cmo_bus_remove(viodev
);
1251 /* convert from struct device to struct vio_dev and pass to driver. */
1252 static int vio_bus_remove(struct device
*dev
)
1254 struct vio_dev
*viodev
= to_vio_dev(dev
);
1255 struct vio_driver
*viodrv
= to_vio_driver(dev
->driver
);
1256 struct device
*devptr
;
1260 * Hold a reference to the device after the remove function is called
1261 * to allow for CMO accounting cleanup for the device.
1263 devptr
= get_device(dev
);
1266 ret
= viodrv
->remove(viodev
);
1268 if (!ret
&& firmware_has_feature(FW_FEATURE_CMO
))
1269 vio_cmo_bus_remove(viodev
);
1276 * vio_register_driver: - Register a new vio driver
1277 * @viodrv: The vio_driver structure to be registered.
1279 int __vio_register_driver(struct vio_driver
*viodrv
, struct module
*owner
,
1280 const char *mod_name
)
1282 pr_debug("%s: driver %s registering\n", __func__
, viodrv
->name
);
1284 /* fill in 'struct driver' fields */
1285 viodrv
->driver
.name
= viodrv
->name
;
1286 viodrv
->driver
.pm
= viodrv
->pm
;
1287 viodrv
->driver
.bus
= &vio_bus_type
;
1288 viodrv
->driver
.owner
= owner
;
1289 viodrv
->driver
.mod_name
= mod_name
;
1291 return driver_register(&viodrv
->driver
);
1293 EXPORT_SYMBOL(__vio_register_driver
);
1296 * vio_unregister_driver - Remove registration of vio driver.
1297 * @viodrv: The vio_driver struct to be removed form registration
1299 void vio_unregister_driver(struct vio_driver
*viodrv
)
1301 driver_unregister(&viodrv
->driver
);
1303 EXPORT_SYMBOL(vio_unregister_driver
);
1305 /* vio_dev refcount hit 0 */
1306 static void vio_dev_release(struct device
*dev
)
1308 struct iommu_table
*tbl
= get_iommu_table_base(dev
);
1311 iommu_tce_table_put(tbl
);
1312 of_node_put(dev
->of_node
);
1313 kfree(to_vio_dev(dev
));
1317 * vio_register_device_node: - Register a new vio device.
1318 * @of_node: The OF node for this device.
1320 * Creates and initializes a vio_dev structure from the data in
1321 * of_node and adds it to the list of virtual devices.
1322 * Returns a pointer to the created vio_dev or NULL if node has
1323 * NULL device_type or compatible fields.
1325 struct vio_dev
*vio_register_device_node(struct device_node
*of_node
)
1327 struct vio_dev
*viodev
;
1328 struct device_node
*parent_node
;
1330 enum vio_dev_family family
;
1333 * Determine if this node is a under the /vdevice node or under the
1334 * /ibm,platform-facilities node. This decides the device's family.
1336 parent_node
= of_get_parent(of_node
);
1338 if (of_node_is_type(parent_node
, "ibm,platform-facilities"))
1340 else if (of_node_is_type(parent_node
, "vdevice"))
1343 pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n",
1347 of_node_put(parent_node
);
1350 of_node_put(parent_node
);
1352 pr_warn("%s: could not determine the parent of node %pOFn.\n",
1357 if (family
== PFO
) {
1358 if (of_get_property(of_node
, "interrupt-controller", NULL
)) {
1359 pr_debug("%s: Skipping the interrupt controller %pOFn.\n",
1365 /* allocate a vio_dev for this node */
1366 viodev
= kzalloc(sizeof(struct vio_dev
), GFP_KERNEL
);
1367 if (viodev
== NULL
) {
1368 pr_warn("%s: allocation failure for VIO device.\n", __func__
);
1372 /* we need the 'device_type' property, in order to match with drivers */
1373 viodev
->family
= family
;
1374 if (viodev
->family
== VDEVICE
) {
1375 unsigned int unit_address
;
1377 viodev
->type
= of_node_get_device_type(of_node
);
1378 if (!viodev
->type
) {
1379 pr_warn("%s: node %pOFn is missing the 'device_type' "
1380 "property.\n", __func__
, of_node
);
1384 prop
= of_get_property(of_node
, "reg", NULL
);
1386 pr_warn("%s: node %pOFn missing 'reg'\n",
1390 unit_address
= of_read_number(prop
, 1);
1391 dev_set_name(&viodev
->dev
, "%x", unit_address
);
1392 viodev
->irq
= irq_of_parse_and_map(of_node
, 0);
1393 viodev
->unit_address
= unit_address
;
1395 /* PFO devices need their resource_id for submitting COP_OPs
1396 * This is an optional field for devices, but is required when
1397 * performing synchronous ops */
1398 prop
= of_get_property(of_node
, "ibm,resource-id", NULL
);
1400 viodev
->resource_id
= of_read_number(prop
, 1);
1402 dev_set_name(&viodev
->dev
, "%pOFn", of_node
);
1403 viodev
->type
= dev_name(&viodev
->dev
);
1407 viodev
->name
= of_node
->name
;
1408 viodev
->dev
.of_node
= of_node_get(of_node
);
1410 set_dev_node(&viodev
->dev
, of_node_to_nid(of_node
));
1412 /* init generic 'struct device' fields: */
1413 viodev
->dev
.parent
= &vio_bus_device
.dev
;
1414 viodev
->dev
.bus
= &vio_bus_type
;
1415 viodev
->dev
.release
= vio_dev_release
;
1417 if (of_get_property(viodev
->dev
.of_node
, "ibm,my-dma-window", NULL
)) {
1418 if (firmware_has_feature(FW_FEATURE_CMO
))
1419 vio_cmo_set_dma_ops(viodev
);
1421 set_dma_ops(&viodev
->dev
, &dma_iommu_ops
);
1423 set_iommu_table_base(&viodev
->dev
,
1424 vio_build_iommu_table(viodev
));
1426 /* needed to ensure proper operation of coherent allocations
1427 * later, in case driver doesn't set it explicitly */
1428 viodev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(64);
1429 viodev
->dev
.dma_mask
= &viodev
->dev
.coherent_dma_mask
;
1432 /* register with generic device framework */
1433 if (device_register(&viodev
->dev
)) {
1434 printk(KERN_ERR
"%s: failed to register device %s\n",
1435 __func__
, dev_name(&viodev
->dev
));
1436 put_device(&viodev
->dev
);
1442 out
: /* Use this exit point for any return prior to device_register */
1447 EXPORT_SYMBOL(vio_register_device_node
);
1450 * vio_bus_scan_for_devices - Scan OF and register each child device
1451 * @root_name - OF node name for the root of the subtree to search.
1452 * This must be non-NULL
1454 * Starting from the root node provide, register the device node for
1455 * each child beneath the root.
1457 static void vio_bus_scan_register_devices(char *root_name
)
1459 struct device_node
*node_root
, *node_child
;
1464 node_root
= of_find_node_by_name(NULL
, root_name
);
1468 * Create struct vio_devices for each virtual device in
1469 * the device tree. Drivers will associate with them later.
1471 node_child
= of_get_next_child(node_root
, NULL
);
1472 while (node_child
) {
1473 vio_register_device_node(node_child
);
1474 node_child
= of_get_next_child(node_root
, node_child
);
1476 of_node_put(node_root
);
1481 * vio_bus_init: - Initialize the virtual IO bus
1483 static int __init
vio_bus_init(void)
1487 if (firmware_has_feature(FW_FEATURE_CMO
))
1488 vio_cmo_sysfs_init();
1490 err
= bus_register(&vio_bus_type
);
1492 printk(KERN_ERR
"failed to register VIO bus\n");
1497 * The fake parent of all vio devices, just to give us
1500 err
= device_register(&vio_bus_device
.dev
);
1502 printk(KERN_WARNING
"%s: device_register returned %i\n",
1507 if (firmware_has_feature(FW_FEATURE_CMO
))
1512 postcore_initcall(vio_bus_init
);
1514 static int __init
vio_device_init(void)
1516 vio_bus_scan_register_devices("vdevice");
1517 vio_bus_scan_register_devices("ibm,platform-facilities");
1521 device_initcall(vio_device_init
);
1523 static ssize_t
name_show(struct device
*dev
,
1524 struct device_attribute
*attr
, char *buf
)
1526 return sprintf(buf
, "%s\n", to_vio_dev(dev
)->name
);
1528 static DEVICE_ATTR_RO(name
);
1530 static ssize_t
devspec_show(struct device
*dev
,
1531 struct device_attribute
*attr
, char *buf
)
1533 struct device_node
*of_node
= dev
->of_node
;
1535 return sprintf(buf
, "%pOF\n", of_node
);
1537 static DEVICE_ATTR_RO(devspec
);
1539 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
1542 const struct vio_dev
*vio_dev
= to_vio_dev(dev
);
1543 struct device_node
*dn
;
1551 cp
= of_get_property(dn
, "compatible", NULL
);
1557 return sprintf(buf
, "vio:T%sS%s\n", vio_dev
->type
, cp
);
1559 static DEVICE_ATTR_RO(modalias
);
1561 static struct attribute
*vio_dev_attrs
[] = {
1562 &dev_attr_name
.attr
,
1563 &dev_attr_devspec
.attr
,
1564 &dev_attr_modalias
.attr
,
1567 ATTRIBUTE_GROUPS(vio_dev
);
1569 void vio_unregister_device(struct vio_dev
*viodev
)
1571 device_unregister(&viodev
->dev
);
1572 if (viodev
->family
== VDEVICE
)
1573 irq_dispose_mapping(viodev
->irq
);
1575 EXPORT_SYMBOL(vio_unregister_device
);
1577 static int vio_bus_match(struct device
*dev
, struct device_driver
*drv
)
1579 const struct vio_dev
*vio_dev
= to_vio_dev(dev
);
1580 struct vio_driver
*vio_drv
= to_vio_driver(drv
);
1581 const struct vio_device_id
*ids
= vio_drv
->id_table
;
1583 return (ids
!= NULL
) && (vio_match_device(ids
, vio_dev
) != NULL
);
1586 static int vio_hotplug(struct device
*dev
, struct kobj_uevent_env
*env
)
1588 const struct vio_dev
*vio_dev
= to_vio_dev(dev
);
1589 struct device_node
*dn
;
1595 cp
= of_get_property(dn
, "compatible", NULL
);
1599 add_uevent_var(env
, "MODALIAS=vio:T%sS%s", vio_dev
->type
, cp
);
1603 struct bus_type vio_bus_type
= {
1605 .dev_groups
= vio_dev_groups
,
1606 .uevent
= vio_hotplug
,
1607 .match
= vio_bus_match
,
1608 .probe
= vio_bus_probe
,
1609 .remove
= vio_bus_remove
,
1613 * vio_get_attribute: - get attribute for virtual device
1614 * @vdev: The vio device to get property.
1615 * @which: The property/attribute to be extracted.
1616 * @length: Pointer to length of returned data size (unused if NULL).
1618 * Calls prom.c's of_get_property() to return the value of the
1619 * attribute specified by @which
1621 const void *vio_get_attribute(struct vio_dev
*vdev
, char *which
, int *length
)
1623 return of_get_property(vdev
->dev
.of_node
, which
, length
);
1625 EXPORT_SYMBOL(vio_get_attribute
);
1627 #ifdef CONFIG_PPC_PSERIES
1628 /* vio_find_name() - internal because only vio.c knows how we formatted the
1631 static struct vio_dev
*vio_find_name(const char *name
)
1633 struct device
*found
;
1635 found
= bus_find_device_by_name(&vio_bus_type
, NULL
, name
);
1639 return to_vio_dev(found
);
1643 * vio_find_node - find an already-registered vio_dev
1644 * @vnode: device_node of the virtual device we're looking for
1646 * Takes a reference to the embedded struct device which needs to be dropped
1649 struct vio_dev
*vio_find_node(struct device_node
*vnode
)
1652 struct device_node
*vnode_parent
;
1654 vnode_parent
= of_get_parent(vnode
);
1658 /* construct the kobject name from the device node */
1659 if (of_node_is_type(vnode_parent
, "vdevice")) {
1662 prop
= of_get_property(vnode
, "reg", NULL
);
1665 snprintf(kobj_name
, sizeof(kobj_name
), "%x",
1666 (uint32_t)of_read_number(prop
, 1));
1667 } else if (of_node_is_type(vnode_parent
, "ibm,platform-facilities"))
1668 snprintf(kobj_name
, sizeof(kobj_name
), "%pOFn", vnode
);
1672 of_node_put(vnode_parent
);
1673 return vio_find_name(kobj_name
);
1675 of_node_put(vnode_parent
);
1678 EXPORT_SYMBOL(vio_find_node
);
1680 int vio_enable_interrupts(struct vio_dev
*dev
)
1682 int rc
= h_vio_signal(dev
->unit_address
, VIO_IRQ_ENABLE
);
1683 if (rc
!= H_SUCCESS
)
1684 printk(KERN_ERR
"vio: Error 0x%x enabling interrupts\n", rc
);
1687 EXPORT_SYMBOL(vio_enable_interrupts
);
1689 int vio_disable_interrupts(struct vio_dev
*dev
)
1691 int rc
= h_vio_signal(dev
->unit_address
, VIO_IRQ_DISABLE
);
1692 if (rc
!= H_SUCCESS
)
1693 printk(KERN_ERR
"vio: Error 0x%x disabling interrupts\n", rc
);
1696 EXPORT_SYMBOL(vio_disable_interrupts
);
1697 #endif /* CONFIG_PPC_PSERIES */
1699 static int __init
vio_init(void)
1701 dma_debug_add_bus(&vio_bus_type
);
1704 fs_initcall(vio_init
);