2 * IBM PowerPC Virtual I/O Infrastructure Support.
4 * Copyright (c) 2003,2008 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com>
9 * Robert Jennings <rcjenn@us.ibm.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/cpu.h>
18 #include <linux/types.h>
19 #include <linux/delay.h>
20 #include <linux/stat.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <linux/console.h>
25 #include <linux/export.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/kobject.h>
30 #include <asm/iommu.h>
34 #include <asm/firmware.h>
36 #include <asm/abs_addr.h>
38 #include <asm/hvcall.h>
40 static struct bus_type vio_bus_type
;
42 static struct vio_dev vio_bus_device
= { /* fake "parent" device */
45 .dev
.init_name
= "vio",
46 .dev
.bus
= &vio_bus_type
,
49 #ifdef CONFIG_PPC_SMLPAR
51 * vio_cmo_pool - A pool of IO memory for CMO use
53 * @size: The size of the pool in bytes
54 * @free: The amount of free memory in the pool
61 /* How many ms to delay queued balance work */
62 #define VIO_CMO_BALANCE_DELAY 100
64 /* Portion out IO memory to CMO devices by this chunk size */
65 #define VIO_CMO_BALANCE_CHUNK 131072
68 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
70 * @vio_dev: struct vio_dev pointer
71 * @list: pointer to other devices on bus that are being tracked
73 struct vio_cmo_dev_entry
{
74 struct vio_dev
*viodev
;
75 struct list_head list
;
79 * vio_cmo - VIO bus accounting structure for CMO entitlement
81 * @lock: spinlock for entire structure
82 * @balance_q: work queue for balancing system entitlement
83 * @device_list: list of CMO-enabled devices requiring entitlement
84 * @entitled: total system entitlement in bytes
85 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
86 * @excess: pool of excess entitlement not needed for device reserves or spare
87 * @spare: IO memory for device hotplug functionality
88 * @min: minimum necessary for system operation
89 * @desired: desired memory for system operation
90 * @curr: bytes currently allocated
91 * @high: high water mark for IO data usage
95 struct delayed_work balance_q
;
96 struct list_head device_list
;
98 struct vio_cmo_pool reserve
;
99 struct vio_cmo_pool excess
;
108 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
110 static int vio_cmo_num_OF_devs(void)
112 struct device_node
*node_vroot
;
116 * Count the number of vdevice entries with an
117 * ibm,my-dma-window OF property
119 node_vroot
= of_find_node_by_name(NULL
, "vdevice");
121 struct device_node
*of_node
;
122 struct property
*prop
;
124 for_each_child_of_node(node_vroot
, of_node
) {
125 prop
= of_find_property(of_node
, "ibm,my-dma-window",
131 of_node_put(node_vroot
);
136 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
138 * @viodev: VIO device requesting IO memory
139 * @size: size of allocation requested
141 * Allocations come from memory reserved for the devices and any excess
142 * IO memory available to all devices. The spare pool used to service
143 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
147 * 0 for successful allocation and -ENOMEM for a failure
149 static inline int vio_cmo_alloc(struct vio_dev
*viodev
, size_t size
)
152 size_t reserve_free
= 0;
153 size_t excess_free
= 0;
156 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
158 /* Determine the amount of free entitlement available in reserve */
159 if (viodev
->cmo
.entitled
> viodev
->cmo
.allocated
)
160 reserve_free
= viodev
->cmo
.entitled
- viodev
->cmo
.allocated
;
162 /* If spare is not fulfilled, the excess pool can not be used. */
163 if (vio_cmo
.spare
>= VIO_CMO_MIN_ENT
)
164 excess_free
= vio_cmo
.excess
.free
;
166 /* The request can be satisfied */
167 if ((reserve_free
+ excess_free
) >= size
) {
168 vio_cmo
.curr
+= size
;
169 if (vio_cmo
.curr
> vio_cmo
.high
)
170 vio_cmo
.high
= vio_cmo
.curr
;
171 viodev
->cmo
.allocated
+= size
;
172 size
-= min(reserve_free
, size
);
173 vio_cmo
.excess
.free
-= size
;
177 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
182 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
183 * @viodev: VIO device freeing IO memory
184 * @size: size of deallocation
186 * IO memory is freed by the device back to the correct memory pools.
187 * The spare pool is replenished first from either memory pool, then
188 * the reserve pool is used to reduce device entitlement, the excess
189 * pool is used to increase the reserve pool toward the desired entitlement
190 * target, and then the remaining memory is returned to the pools.
193 static inline void vio_cmo_dealloc(struct vio_dev
*viodev
, size_t size
)
196 size_t spare_needed
= 0;
197 size_t excess_freed
= 0;
198 size_t reserve_freed
= size
;
202 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
203 vio_cmo
.curr
-= size
;
205 /* Amount of memory freed from the excess pool */
206 if (viodev
->cmo
.allocated
> viodev
->cmo
.entitled
) {
207 excess_freed
= min(reserve_freed
, (viodev
->cmo
.allocated
-
208 viodev
->cmo
.entitled
));
209 reserve_freed
-= excess_freed
;
212 /* Remove allocation from device */
213 viodev
->cmo
.allocated
-= (reserve_freed
+ excess_freed
);
215 /* Spare is a subset of the reserve pool, replenish it first. */
216 spare_needed
= VIO_CMO_MIN_ENT
- vio_cmo
.spare
;
219 * Replenish the spare in the reserve pool from the excess pool.
220 * This moves entitlement into the reserve pool.
222 if (spare_needed
&& excess_freed
) {
223 tmp
= min(excess_freed
, spare_needed
);
224 vio_cmo
.excess
.size
-= tmp
;
225 vio_cmo
.reserve
.size
+= tmp
;
226 vio_cmo
.spare
+= tmp
;
233 * Replenish the spare in the reserve pool from the reserve pool.
234 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
235 * if needed, and gives it to the spare pool. The amount of used
236 * memory in this pool does not change.
238 if (spare_needed
&& reserve_freed
) {
239 tmp
= min3(spare_needed
, reserve_freed
, (viodev
->cmo
.entitled
- VIO_CMO_MIN_ENT
));
241 vio_cmo
.spare
+= tmp
;
242 viodev
->cmo
.entitled
-= tmp
;
243 reserve_freed
-= tmp
;
249 * Increase the reserve pool until the desired allocation is met.
250 * Move an allocation freed from the excess pool into the reserve
251 * pool and schedule a balance operation.
253 if (excess_freed
&& (vio_cmo
.desired
> vio_cmo
.reserve
.size
)) {
254 tmp
= min(excess_freed
, (vio_cmo
.desired
- vio_cmo
.reserve
.size
));
256 vio_cmo
.excess
.size
-= tmp
;
257 vio_cmo
.reserve
.size
+= tmp
;
262 /* Return memory from the excess pool to that pool */
264 vio_cmo
.excess
.free
+= excess_freed
;
267 schedule_delayed_work(&vio_cmo
.balance_q
, VIO_CMO_BALANCE_DELAY
);
268 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
272 * vio_cmo_entitlement_update - Manage system entitlement changes
274 * @new_entitlement: new system entitlement to attempt to accommodate
276 * Increases in entitlement will be used to fulfill the spare entitlement
277 * and the rest is given to the excess pool. Decreases, if they are
278 * possible, come from the excess pool and from unused device entitlement
280 * Returns: 0 on success, -ENOMEM when change can not be made
282 int vio_cmo_entitlement_update(size_t new_entitlement
)
284 struct vio_dev
*viodev
;
285 struct vio_cmo_dev_entry
*dev_ent
;
287 size_t avail
, delta
, tmp
;
289 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
291 /* Entitlement increases */
292 if (new_entitlement
> vio_cmo
.entitled
) {
293 delta
= new_entitlement
- vio_cmo
.entitled
;
295 /* Fulfill spare allocation */
296 if (vio_cmo
.spare
< VIO_CMO_MIN_ENT
) {
297 tmp
= min(delta
, (VIO_CMO_MIN_ENT
- vio_cmo
.spare
));
298 vio_cmo
.spare
+= tmp
;
299 vio_cmo
.reserve
.size
+= tmp
;
303 /* Remaining new allocation goes to the excess pool */
304 vio_cmo
.entitled
+= delta
;
305 vio_cmo
.excess
.size
+= delta
;
306 vio_cmo
.excess
.free
+= delta
;
311 /* Entitlement decreases */
312 delta
= vio_cmo
.entitled
- new_entitlement
;
313 avail
= vio_cmo
.excess
.free
;
316 * Need to check how much unused entitlement each device can
317 * sacrifice to fulfill entitlement change.
319 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
323 viodev
= dev_ent
->viodev
;
324 if ((viodev
->cmo
.entitled
> viodev
->cmo
.allocated
) &&
325 (viodev
->cmo
.entitled
> VIO_CMO_MIN_ENT
))
326 avail
+= viodev
->cmo
.entitled
-
327 max_t(size_t, viodev
->cmo
.allocated
,
331 if (delta
<= avail
) {
332 vio_cmo
.entitled
-= delta
;
334 /* Take entitlement from the excess pool first */
335 tmp
= min(vio_cmo
.excess
.free
, delta
);
336 vio_cmo
.excess
.size
-= tmp
;
337 vio_cmo
.excess
.free
-= tmp
;
341 * Remove all but VIO_CMO_MIN_ENT bytes from devices
342 * until entitlement change is served
344 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
348 viodev
= dev_ent
->viodev
;
350 if ((viodev
->cmo
.entitled
> viodev
->cmo
.allocated
) &&
351 (viodev
->cmo
.entitled
> VIO_CMO_MIN_ENT
))
352 tmp
= viodev
->cmo
.entitled
-
353 max_t(size_t, viodev
->cmo
.allocated
,
355 viodev
->cmo
.entitled
-= min(tmp
, delta
);
356 delta
-= min(tmp
, delta
);
359 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
364 schedule_delayed_work(&vio_cmo
.balance_q
, 0);
365 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
370 * vio_cmo_balance - Balance entitlement among devices
372 * @work: work queue structure for this operation
374 * Any system entitlement above the minimum needed for devices, or
375 * already allocated to devices, can be distributed to the devices.
376 * The list of devices is iterated through to recalculate the desired
377 * entitlement level and to determine how much entitlement above the
378 * minimum entitlement is allocated to devices.
380 * Small chunks of the available entitlement are given to devices until
381 * their requirements are fulfilled or there is no entitlement left to give.
382 * Upon completion sizes of the reserve and excess pools are calculated.
384 * The system minimum entitlement level is also recalculated here.
385 * Entitlement will be reserved for devices even after vio_bus_remove to
386 * accommodate reloading the driver. The OF tree is walked to count the
387 * number of devices present and this will remove entitlement for devices
388 * that have actually left the system after having vio_bus_remove called.
390 static void vio_cmo_balance(struct work_struct
*work
)
393 struct vio_dev
*viodev
;
394 struct vio_cmo_dev_entry
*dev_ent
;
396 size_t avail
= 0, level
, chunk
, need
;
397 int devcount
= 0, fulfilled
;
399 cmo
= container_of(work
, struct vio_cmo
, balance_q
.work
);
401 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
403 /* Calculate minimum entitlement and fulfill spare */
404 cmo
->min
= vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT
;
405 BUG_ON(cmo
->min
> cmo
->entitled
);
406 cmo
->spare
= min_t(size_t, VIO_CMO_MIN_ENT
, (cmo
->entitled
- cmo
->min
));
407 cmo
->min
+= cmo
->spare
;
408 cmo
->desired
= cmo
->min
;
411 * Determine how much entitlement is available and reset device
414 avail
= cmo
->entitled
- cmo
->spare
;
415 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
416 viodev
= dev_ent
->viodev
;
418 viodev
->cmo
.entitled
= VIO_CMO_MIN_ENT
;
419 cmo
->desired
+= (viodev
->cmo
.desired
- VIO_CMO_MIN_ENT
);
420 avail
-= max_t(size_t, viodev
->cmo
.allocated
, VIO_CMO_MIN_ENT
);
424 * Having provided each device with the minimum entitlement, loop
425 * over the devices portioning out the remaining entitlement
426 * until there is nothing left.
428 level
= VIO_CMO_MIN_ENT
;
431 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
432 viodev
= dev_ent
->viodev
;
434 if (viodev
->cmo
.desired
<= level
) {
440 * Give the device up to VIO_CMO_BALANCE_CHUNK
441 * bytes of entitlement, but do not exceed the
442 * desired level of entitlement for the device.
444 chunk
= min_t(size_t, avail
, VIO_CMO_BALANCE_CHUNK
);
445 chunk
= min(chunk
, (viodev
->cmo
.desired
-
446 viodev
->cmo
.entitled
));
447 viodev
->cmo
.entitled
+= chunk
;
450 * If the memory for this entitlement increase was
451 * already allocated to the device it does not come
452 * from the available pool being portioned out.
454 need
= max(viodev
->cmo
.allocated
, viodev
->cmo
.entitled
)-
455 max(viodev
->cmo
.allocated
, level
);
459 if (fulfilled
== devcount
)
461 level
+= VIO_CMO_BALANCE_CHUNK
;
464 /* Calculate new reserve and excess pool sizes */
465 cmo
->reserve
.size
= cmo
->min
;
466 cmo
->excess
.free
= 0;
467 cmo
->excess
.size
= 0;
469 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
) {
470 viodev
= dev_ent
->viodev
;
471 /* Calculated reserve size above the minimum entitlement */
472 if (viodev
->cmo
.entitled
)
473 cmo
->reserve
.size
+= (viodev
->cmo
.entitled
-
475 /* Calculated used excess entitlement */
476 if (viodev
->cmo
.allocated
> viodev
->cmo
.entitled
)
477 need
+= viodev
->cmo
.allocated
- viodev
->cmo
.entitled
;
479 cmo
->excess
.size
= cmo
->entitled
- cmo
->reserve
.size
;
480 cmo
->excess
.free
= cmo
->excess
.size
- need
;
482 cancel_delayed_work(to_delayed_work(work
));
483 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
486 static void *vio_dma_iommu_alloc_coherent(struct device
*dev
, size_t size
,
487 dma_addr_t
*dma_handle
, gfp_t flag
,
488 struct dma_attrs
*attrs
)
490 struct vio_dev
*viodev
= to_vio_dev(dev
);
493 if (vio_cmo_alloc(viodev
, roundup(size
, PAGE_SIZE
))) {
494 atomic_inc(&viodev
->cmo
.allocs_failed
);
498 ret
= dma_iommu_ops
.alloc(dev
, size
, dma_handle
, flag
, attrs
);
499 if (unlikely(ret
== NULL
)) {
500 vio_cmo_dealloc(viodev
, roundup(size
, PAGE_SIZE
));
501 atomic_inc(&viodev
->cmo
.allocs_failed
);
507 static void vio_dma_iommu_free_coherent(struct device
*dev
, size_t size
,
508 void *vaddr
, dma_addr_t dma_handle
,
509 struct dma_attrs
*attrs
)
511 struct vio_dev
*viodev
= to_vio_dev(dev
);
513 dma_iommu_ops
.free(dev
, size
, vaddr
, dma_handle
, attrs
);
515 vio_cmo_dealloc(viodev
, roundup(size
, PAGE_SIZE
));
518 static dma_addr_t
vio_dma_iommu_map_page(struct device
*dev
, struct page
*page
,
519 unsigned long offset
, size_t size
,
520 enum dma_data_direction direction
,
521 struct dma_attrs
*attrs
)
523 struct vio_dev
*viodev
= to_vio_dev(dev
);
524 dma_addr_t ret
= DMA_ERROR_CODE
;
526 if (vio_cmo_alloc(viodev
, roundup(size
, IOMMU_PAGE_SIZE
))) {
527 atomic_inc(&viodev
->cmo
.allocs_failed
);
531 ret
= dma_iommu_ops
.map_page(dev
, page
, offset
, size
, direction
, attrs
);
532 if (unlikely(dma_mapping_error(dev
, ret
))) {
533 vio_cmo_dealloc(viodev
, roundup(size
, IOMMU_PAGE_SIZE
));
534 atomic_inc(&viodev
->cmo
.allocs_failed
);
540 static void vio_dma_iommu_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
542 enum dma_data_direction direction
,
543 struct dma_attrs
*attrs
)
545 struct vio_dev
*viodev
= to_vio_dev(dev
);
547 dma_iommu_ops
.unmap_page(dev
, dma_handle
, size
, direction
, attrs
);
549 vio_cmo_dealloc(viodev
, roundup(size
, IOMMU_PAGE_SIZE
));
552 static int vio_dma_iommu_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
553 int nelems
, enum dma_data_direction direction
,
554 struct dma_attrs
*attrs
)
556 struct vio_dev
*viodev
= to_vio_dev(dev
);
557 struct scatterlist
*sgl
;
559 size_t alloc_size
= 0;
561 for (sgl
= sglist
; count
< nelems
; count
++, sgl
++)
562 alloc_size
+= roundup(sgl
->length
, IOMMU_PAGE_SIZE
);
564 if (vio_cmo_alloc(viodev
, alloc_size
)) {
565 atomic_inc(&viodev
->cmo
.allocs_failed
);
569 ret
= dma_iommu_ops
.map_sg(dev
, sglist
, nelems
, direction
, attrs
);
571 if (unlikely(!ret
)) {
572 vio_cmo_dealloc(viodev
, alloc_size
);
573 atomic_inc(&viodev
->cmo
.allocs_failed
);
577 for (sgl
= sglist
, count
= 0; count
< ret
; count
++, sgl
++)
578 alloc_size
-= roundup(sgl
->dma_length
, IOMMU_PAGE_SIZE
);
580 vio_cmo_dealloc(viodev
, alloc_size
);
585 static void vio_dma_iommu_unmap_sg(struct device
*dev
,
586 struct scatterlist
*sglist
, int nelems
,
587 enum dma_data_direction direction
,
588 struct dma_attrs
*attrs
)
590 struct vio_dev
*viodev
= to_vio_dev(dev
);
591 struct scatterlist
*sgl
;
592 size_t alloc_size
= 0;
595 for (sgl
= sglist
; count
< nelems
; count
++, sgl
++)
596 alloc_size
+= roundup(sgl
->dma_length
, IOMMU_PAGE_SIZE
);
598 dma_iommu_ops
.unmap_sg(dev
, sglist
, nelems
, direction
, attrs
);
600 vio_cmo_dealloc(viodev
, alloc_size
);
603 static int vio_dma_iommu_dma_supported(struct device
*dev
, u64 mask
)
605 return dma_iommu_ops
.dma_supported(dev
, mask
);
608 static u64
vio_dma_get_required_mask(struct device
*dev
)
610 return dma_iommu_ops
.get_required_mask(dev
);
613 struct dma_map_ops vio_dma_mapping_ops
= {
614 .alloc
= vio_dma_iommu_alloc_coherent
,
615 .free
= vio_dma_iommu_free_coherent
,
616 .map_sg
= vio_dma_iommu_map_sg
,
617 .unmap_sg
= vio_dma_iommu_unmap_sg
,
618 .map_page
= vio_dma_iommu_map_page
,
619 .unmap_page
= vio_dma_iommu_unmap_page
,
620 .dma_supported
= vio_dma_iommu_dma_supported
,
621 .get_required_mask
= vio_dma_get_required_mask
,
625 * vio_cmo_set_dev_desired - Set desired entitlement for a device
627 * @viodev: struct vio_dev for device to alter
628 * @desired: new desired entitlement level in bytes
630 * For use by devices to request a change to their entitlement at runtime or
631 * through sysfs. The desired entitlement level is changed and a balancing
632 * of system resources is scheduled to run in the future.
634 void vio_cmo_set_dev_desired(struct vio_dev
*viodev
, size_t desired
)
637 struct vio_cmo_dev_entry
*dev_ent
;
640 if (!firmware_has_feature(FW_FEATURE_CMO
))
643 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
644 if (desired
< VIO_CMO_MIN_ENT
)
645 desired
= VIO_CMO_MIN_ENT
;
648 * Changes will not be made for devices not in the device list.
649 * If it is not in the device list, then no driver is loaded
650 * for the device and it can not receive entitlement.
652 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
)
653 if (viodev
== dev_ent
->viodev
) {
658 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
662 /* Increase/decrease in desired device entitlement */
663 if (desired
>= viodev
->cmo
.desired
) {
664 /* Just bump the bus and device values prior to a balance*/
665 vio_cmo
.desired
+= desired
- viodev
->cmo
.desired
;
666 viodev
->cmo
.desired
= desired
;
668 /* Decrease bus and device values for desired entitlement */
669 vio_cmo
.desired
-= viodev
->cmo
.desired
- desired
;
670 viodev
->cmo
.desired
= desired
;
672 * If less entitlement is desired than current entitlement, move
673 * any reserve memory in the change region to the excess pool.
675 if (viodev
->cmo
.entitled
> desired
) {
676 vio_cmo
.reserve
.size
-= viodev
->cmo
.entitled
- desired
;
677 vio_cmo
.excess
.size
+= viodev
->cmo
.entitled
- desired
;
679 * If entitlement moving from the reserve pool to the
680 * excess pool is currently unused, add to the excess
683 if (viodev
->cmo
.allocated
< viodev
->cmo
.entitled
)
684 vio_cmo
.excess
.free
+= viodev
->cmo
.entitled
-
685 max(viodev
->cmo
.allocated
, desired
);
686 viodev
->cmo
.entitled
= desired
;
689 schedule_delayed_work(&vio_cmo
.balance_q
, 0);
690 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
694 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
696 * @viodev - Pointer to struct vio_dev for device
698 * Determine the devices IO memory entitlement needs, attempting
699 * to satisfy the system minimum entitlement at first and scheduling
700 * a balance operation to take care of the rest at a later time.
702 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
703 * -ENOMEM when entitlement is not available for device or
707 static int vio_cmo_bus_probe(struct vio_dev
*viodev
)
709 struct vio_cmo_dev_entry
*dev_ent
;
710 struct device
*dev
= &viodev
->dev
;
711 struct vio_driver
*viodrv
= to_vio_driver(dev
->driver
);
714 bool dma_capable
= false;
716 /* A device requires entitlement if it has a DMA window property */
717 switch (viodev
->family
) {
719 if (of_get_property(viodev
->dev
.of_node
,
720 "ibm,my-dma-window", NULL
))
727 dev_warn(dev
, "unknown device family: %d\n", viodev
->family
);
732 /* Configure entitlement for the device. */
734 /* Check that the driver is CMO enabled and get desired DMA */
735 if (!viodrv
->get_desired_dma
) {
736 dev_err(dev
, "%s: device driver does not support CMO\n",
741 viodev
->cmo
.desired
= IOMMU_PAGE_ALIGN(viodrv
->get_desired_dma(viodev
));
742 if (viodev
->cmo
.desired
< VIO_CMO_MIN_ENT
)
743 viodev
->cmo
.desired
= VIO_CMO_MIN_ENT
;
744 size
= VIO_CMO_MIN_ENT
;
746 dev_ent
= kmalloc(sizeof(struct vio_cmo_dev_entry
),
751 dev_ent
->viodev
= viodev
;
752 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
753 list_add(&dev_ent
->list
, &vio_cmo
.device_list
);
755 viodev
->cmo
.desired
= 0;
757 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
761 * If the needs for vio_cmo.min have not changed since they
762 * were last set, the number of devices in the OF tree has
763 * been constant and the IO memory for this is already in
766 if (vio_cmo
.min
== ((vio_cmo_num_OF_devs() + 1) *
768 /* Updated desired entitlement if device requires it */
770 vio_cmo
.desired
+= (viodev
->cmo
.desired
-
775 tmp
= vio_cmo
.spare
+ vio_cmo
.excess
.free
;
777 dev_err(dev
, "%s: insufficient free "
778 "entitlement to add device. "
779 "Need %lu, have %lu\n", __func__
,
780 size
, (vio_cmo
.spare
+ tmp
));
781 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
785 /* Use excess pool first to fulfill request */
786 tmp
= min(size
, vio_cmo
.excess
.free
);
787 vio_cmo
.excess
.free
-= tmp
;
788 vio_cmo
.excess
.size
-= tmp
;
789 vio_cmo
.reserve
.size
+= tmp
;
791 /* Use spare if excess pool was insufficient */
792 vio_cmo
.spare
-= size
- tmp
;
794 /* Update bus accounting */
796 vio_cmo
.desired
+= viodev
->cmo
.desired
;
798 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
803 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
805 * @viodev - Pointer to struct vio_dev for device
807 * Remove the device from the cmo device list. The minimum entitlement
808 * will be reserved for the device as long as it is in the system. The
809 * rest of the entitlement the device had been allocated will be returned
812 static void vio_cmo_bus_remove(struct vio_dev
*viodev
)
814 struct vio_cmo_dev_entry
*dev_ent
;
818 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
819 if (viodev
->cmo
.allocated
) {
820 dev_err(&viodev
->dev
, "%s: device had %lu bytes of IO "
821 "allocated after remove operation.\n",
822 __func__
, viodev
->cmo
.allocated
);
827 * Remove the device from the device list being maintained for
828 * CMO enabled devices.
830 list_for_each_entry(dev_ent
, &vio_cmo
.device_list
, list
)
831 if (viodev
== dev_ent
->viodev
) {
832 list_del(&dev_ent
->list
);
838 * Devices may not require any entitlement and they do not need
839 * to be processed. Otherwise, return the device's entitlement
842 if (viodev
->cmo
.entitled
) {
844 * This device has not yet left the OF tree, it's
845 * minimum entitlement remains in vio_cmo.min and
848 vio_cmo
.desired
-= (viodev
->cmo
.desired
- VIO_CMO_MIN_ENT
);
851 * Save min allocation for device in reserve as long
852 * as it exists in OF tree as determined by later
855 viodev
->cmo
.entitled
-= VIO_CMO_MIN_ENT
;
857 /* Replenish spare from freed reserve pool */
858 if (viodev
->cmo
.entitled
&& (vio_cmo
.spare
< VIO_CMO_MIN_ENT
)) {
859 tmp
= min(viodev
->cmo
.entitled
, (VIO_CMO_MIN_ENT
-
861 vio_cmo
.spare
+= tmp
;
862 viodev
->cmo
.entitled
-= tmp
;
865 /* Remaining reserve goes to excess pool */
866 vio_cmo
.excess
.size
+= viodev
->cmo
.entitled
;
867 vio_cmo
.excess
.free
+= viodev
->cmo
.entitled
;
868 vio_cmo
.reserve
.size
-= viodev
->cmo
.entitled
;
871 * Until the device is removed it will keep a
872 * minimum entitlement; this will guarantee that
873 * a module unload/load will result in a success.
875 viodev
->cmo
.entitled
= VIO_CMO_MIN_ENT
;
876 viodev
->cmo
.desired
= VIO_CMO_MIN_ENT
;
877 atomic_set(&viodev
->cmo
.allocs_failed
, 0);
880 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
883 static void vio_cmo_set_dma_ops(struct vio_dev
*viodev
)
885 set_dma_ops(&viodev
->dev
, &vio_dma_mapping_ops
);
889 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
891 * Set up the reserve and excess entitlement pools based on available
892 * system entitlement and the number of devices in the OF tree that
893 * require entitlement in the reserve pool.
895 static void vio_cmo_bus_init(void)
897 struct hvcall_mpp_data mpp_data
;
900 memset(&vio_cmo
, 0, sizeof(struct vio_cmo
));
901 spin_lock_init(&vio_cmo
.lock
);
902 INIT_LIST_HEAD(&vio_cmo
.device_list
);
903 INIT_DELAYED_WORK(&vio_cmo
.balance_q
, vio_cmo_balance
);
905 /* Get current system entitlement */
906 err
= h_get_mpp(&mpp_data
);
909 * On failure, continue with entitlement set to 0, will panic()
910 * later when spare is reserved.
912 if (err
!= H_SUCCESS
) {
913 printk(KERN_ERR
"%s: unable to determine system IO "\
914 "entitlement. (%d)\n", __func__
, err
);
915 vio_cmo
.entitled
= 0;
917 vio_cmo
.entitled
= mpp_data
.entitled_mem
;
920 /* Set reservation and check against entitlement */
921 vio_cmo
.spare
= VIO_CMO_MIN_ENT
;
922 vio_cmo
.reserve
.size
= vio_cmo
.spare
;
923 vio_cmo
.reserve
.size
+= (vio_cmo_num_OF_devs() *
925 if (vio_cmo
.reserve
.size
> vio_cmo
.entitled
) {
926 printk(KERN_ERR
"%s: insufficient system entitlement\n",
928 panic("%s: Insufficient system entitlement", __func__
);
931 /* Set the remaining accounting variables */
932 vio_cmo
.excess
.size
= vio_cmo
.entitled
- vio_cmo
.reserve
.size
;
933 vio_cmo
.excess
.free
= vio_cmo
.excess
.size
;
934 vio_cmo
.min
= vio_cmo
.reserve
.size
;
935 vio_cmo
.desired
= vio_cmo
.reserve
.size
;
938 /* sysfs device functions and data structures for CMO */
940 #define viodev_cmo_rd_attr(name) \
941 static ssize_t viodev_cmo_##name##_show(struct device *dev, \
942 struct device_attribute *attr, \
945 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
948 static ssize_t
viodev_cmo_allocs_failed_show(struct device
*dev
,
949 struct device_attribute
*attr
, char *buf
)
951 struct vio_dev
*viodev
= to_vio_dev(dev
);
952 return sprintf(buf
, "%d\n", atomic_read(&viodev
->cmo
.allocs_failed
));
955 static ssize_t
viodev_cmo_allocs_failed_reset(struct device
*dev
,
956 struct device_attribute
*attr
, const char *buf
, size_t count
)
958 struct vio_dev
*viodev
= to_vio_dev(dev
);
959 atomic_set(&viodev
->cmo
.allocs_failed
, 0);
963 static ssize_t
viodev_cmo_desired_set(struct device
*dev
,
964 struct device_attribute
*attr
, const char *buf
, size_t count
)
966 struct vio_dev
*viodev
= to_vio_dev(dev
);
970 ret
= strict_strtoul(buf
, 10, &new_desired
);
974 vio_cmo_set_dev_desired(viodev
, new_desired
);
978 viodev_cmo_rd_attr(desired
);
979 viodev_cmo_rd_attr(entitled
);
980 viodev_cmo_rd_attr(allocated
);
982 static ssize_t
name_show(struct device
*, struct device_attribute
*, char *);
983 static ssize_t
devspec_show(struct device
*, struct device_attribute
*, char *);
984 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
986 static struct device_attribute vio_cmo_dev_attrs
[] = {
990 __ATTR(cmo_desired
, S_IWUSR
|S_IRUSR
|S_IWGRP
|S_IRGRP
|S_IROTH
,
991 viodev_cmo_desired_show
, viodev_cmo_desired_set
),
992 __ATTR(cmo_entitled
, S_IRUGO
, viodev_cmo_entitled_show
, NULL
),
993 __ATTR(cmo_allocated
, S_IRUGO
, viodev_cmo_allocated_show
, NULL
),
994 __ATTR(cmo_allocs_failed
, S_IWUSR
|S_IRUSR
|S_IWGRP
|S_IRGRP
|S_IROTH
,
995 viodev_cmo_allocs_failed_show
, viodev_cmo_allocs_failed_reset
),
999 /* sysfs bus functions and data structures for CMO */
1001 #define viobus_cmo_rd_attr(name) \
1003 viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \
1005 return sprintf(buf, "%lu\n", vio_cmo.name); \
1008 #define viobus_cmo_pool_rd_attr(name, var) \
1010 viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \
1012 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1015 static ssize_t
viobus_cmo_high_reset(struct bus_type
*bt
, const char *buf
,
1018 unsigned long flags
;
1020 spin_lock_irqsave(&vio_cmo
.lock
, flags
);
1021 vio_cmo
.high
= vio_cmo
.curr
;
1022 spin_unlock_irqrestore(&vio_cmo
.lock
, flags
);
1027 viobus_cmo_rd_attr(entitled
);
1028 viobus_cmo_pool_rd_attr(reserve
, size
);
1029 viobus_cmo_pool_rd_attr(excess
, size
);
1030 viobus_cmo_pool_rd_attr(excess
, free
);
1031 viobus_cmo_rd_attr(spare
);
1032 viobus_cmo_rd_attr(min
);
1033 viobus_cmo_rd_attr(desired
);
1034 viobus_cmo_rd_attr(curr
);
1035 viobus_cmo_rd_attr(high
);
1037 static struct bus_attribute vio_cmo_bus_attrs
[] = {
1038 __ATTR(cmo_entitled
, S_IRUGO
, viobus_cmo_entitled_show
, NULL
),
1039 __ATTR(cmo_reserve_size
, S_IRUGO
, viobus_cmo_reserve_pool_show_size
, NULL
),
1040 __ATTR(cmo_excess_size
, S_IRUGO
, viobus_cmo_excess_pool_show_size
, NULL
),
1041 __ATTR(cmo_excess_free
, S_IRUGO
, viobus_cmo_excess_pool_show_free
, NULL
),
1042 __ATTR(cmo_spare
, S_IRUGO
, viobus_cmo_spare_show
, NULL
),
1043 __ATTR(cmo_min
, S_IRUGO
, viobus_cmo_min_show
, NULL
),
1044 __ATTR(cmo_desired
, S_IRUGO
, viobus_cmo_desired_show
, NULL
),
1045 __ATTR(cmo_curr
, S_IRUGO
, viobus_cmo_curr_show
, NULL
),
1046 __ATTR(cmo_high
, S_IWUSR
|S_IRUSR
|S_IWGRP
|S_IRGRP
|S_IROTH
,
1047 viobus_cmo_high_show
, viobus_cmo_high_reset
),
1051 static void vio_cmo_sysfs_init(void)
1053 vio_bus_type
.dev_attrs
= vio_cmo_dev_attrs
;
1054 vio_bus_type
.bus_attrs
= vio_cmo_bus_attrs
;
1056 #else /* CONFIG_PPC_SMLPAR */
1057 int vio_cmo_entitlement_update(size_t new_entitlement
) { return 0; }
1058 void vio_cmo_set_dev_desired(struct vio_dev
*viodev
, size_t desired
) {}
1059 static int vio_cmo_bus_probe(struct vio_dev
*viodev
) { return 0; }
1060 static void vio_cmo_bus_remove(struct vio_dev
*viodev
) {}
1061 static void vio_cmo_set_dma_ops(struct vio_dev
*viodev
) {}
1062 static void vio_cmo_bus_init(void) {}
1063 static void vio_cmo_sysfs_init(void) { }
1064 #endif /* CONFIG_PPC_SMLPAR */
1065 EXPORT_SYMBOL(vio_cmo_entitlement_update
);
1066 EXPORT_SYMBOL(vio_cmo_set_dev_desired
);
1070 * Platform Facilities Option (PFO) support
1074 * vio_h_cop_sync - Perform a synchronous PFO co-processor operation
1076 * @vdev - Pointer to a struct vio_dev for device
1077 * @op - Pointer to a struct vio_pfo_op for the operation parameters
1079 * Calls the hypervisor to synchronously perform the PFO operation
1080 * described in @op. In the case of a busy response from the hypervisor,
1081 * the operation will be re-submitted indefinitely unless a non-zero timeout
1082 * is specified or an error occurs. The timeout places a limit on when to
1083 * stop re-submitting a operation, the total time can be exceeded if an
1084 * operation is in progress.
1086 * If op->hcall_ret is not NULL, this will be set to the return from the
1087 * last h_cop_op call or it will be 0 if an error not involving the h_call
1092 * -EINVAL if the h_call fails due to an invalid parameter,
1093 * -E2BIG if the h_call can not be performed synchronously,
1094 * -EBUSY if a timeout is specified and has elapsed,
1095 * -EACCES if the memory area for data/status has been rescinded, or
1096 * -EPERM if a hardware fault has been indicated
1098 int vio_h_cop_sync(struct vio_dev
*vdev
, struct vio_pfo_op
*op
)
1100 struct device
*dev
= &vdev
->dev
;
1101 unsigned long deadline
= 0;
1106 deadline
= jiffies
+ msecs_to_jiffies(op
->timeout
);
1109 hret
= plpar_hcall_norets(H_COP
, op
->flags
,
1111 op
->in
, op
->inlen
, op
->out
,
1112 op
->outlen
, op
->csbcpb
);
1114 if (hret
== H_SUCCESS
||
1115 (hret
!= H_NOT_ENOUGH_RESOURCES
&&
1116 hret
!= H_BUSY
&& hret
!= H_RESOURCE
) ||
1117 (op
->timeout
&& time_after(deadline
, jiffies
)))
1120 dev_dbg(dev
, "%s: hcall ret(%ld), retrying.\n", __func__
, hret
);
1137 case H_NOT_ENOUGH_RESOURCES
:
1148 dev_dbg(dev
, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1149 __func__
, ret
, hret
);
1151 op
->hcall_err
= hret
;
1154 EXPORT_SYMBOL(vio_h_cop_sync
);
1156 static struct iommu_table
*vio_build_iommu_table(struct vio_dev
*dev
)
1158 const unsigned char *dma_window
;
1159 struct iommu_table
*tbl
;
1160 unsigned long offset
, size
;
1162 dma_window
= of_get_property(dev
->dev
.of_node
,
1163 "ibm,my-dma-window", NULL
);
1167 tbl
= kzalloc(sizeof(*tbl
), GFP_KERNEL
);
1171 of_parse_dma_window(dev
->dev
.of_node
, dma_window
,
1172 &tbl
->it_index
, &offset
, &size
);
1174 /* TCE table size - measured in tce entries */
1175 tbl
->it_size
= size
>> IOMMU_PAGE_SHIFT
;
1176 /* offset for VIO should always be 0 */
1177 tbl
->it_offset
= offset
>> IOMMU_PAGE_SHIFT
;
1179 tbl
->it_type
= TCE_VB
;
1180 tbl
->it_blocksize
= 16;
1182 return iommu_init_table(tbl
, -1);
1186 * vio_match_device: - Tell if a VIO device has a matching
1187 * VIO device id structure.
1188 * @ids: array of VIO device id structures to search in
1189 * @dev: the VIO device structure to match against
1191 * Used by a driver to check whether a VIO device present in the
1192 * system is in its list of supported devices. Returns the matching
1193 * vio_device_id structure or NULL if there is no match.
1195 static const struct vio_device_id
*vio_match_device(
1196 const struct vio_device_id
*ids
, const struct vio_dev
*dev
)
1198 while (ids
->type
[0] != '\0') {
1199 if ((strncmp(dev
->type
, ids
->type
, strlen(ids
->type
)) == 0) &&
1200 of_device_is_compatible(dev
->dev
.of_node
,
1209 * Convert from struct device to struct vio_dev and pass to driver.
1210 * dev->driver has already been set by generic code because vio_bus_match
1213 static int vio_bus_probe(struct device
*dev
)
1215 struct vio_dev
*viodev
= to_vio_dev(dev
);
1216 struct vio_driver
*viodrv
= to_vio_driver(dev
->driver
);
1217 const struct vio_device_id
*id
;
1218 int error
= -ENODEV
;
1223 id
= vio_match_device(viodrv
->id_table
, viodev
);
1225 memset(&viodev
->cmo
, 0, sizeof(viodev
->cmo
));
1226 if (firmware_has_feature(FW_FEATURE_CMO
)) {
1227 error
= vio_cmo_bus_probe(viodev
);
1231 error
= viodrv
->probe(viodev
, id
);
1232 if (error
&& firmware_has_feature(FW_FEATURE_CMO
))
1233 vio_cmo_bus_remove(viodev
);
1239 /* convert from struct device to struct vio_dev and pass to driver. */
1240 static int vio_bus_remove(struct device
*dev
)
1242 struct vio_dev
*viodev
= to_vio_dev(dev
);
1243 struct vio_driver
*viodrv
= to_vio_driver(dev
->driver
);
1244 struct device
*devptr
;
1248 * Hold a reference to the device after the remove function is called
1249 * to allow for CMO accounting cleanup for the device.
1251 devptr
= get_device(dev
);
1254 ret
= viodrv
->remove(viodev
);
1256 if (!ret
&& firmware_has_feature(FW_FEATURE_CMO
))
1257 vio_cmo_bus_remove(viodev
);
1264 * vio_register_driver: - Register a new vio driver
1265 * @viodrv: The vio_driver structure to be registered.
1267 int __vio_register_driver(struct vio_driver
*viodrv
, struct module
*owner
,
1268 const char *mod_name
)
1270 pr_debug("%s: driver %s registering\n", __func__
, viodrv
->name
);
1272 /* fill in 'struct driver' fields */
1273 viodrv
->driver
.name
= viodrv
->name
;
1274 viodrv
->driver
.pm
= viodrv
->pm
;
1275 viodrv
->driver
.bus
= &vio_bus_type
;
1276 viodrv
->driver
.owner
= owner
;
1277 viodrv
->driver
.mod_name
= mod_name
;
1279 return driver_register(&viodrv
->driver
);
1281 EXPORT_SYMBOL(__vio_register_driver
);
1284 * vio_unregister_driver - Remove registration of vio driver.
1285 * @viodrv: The vio_driver struct to be removed form registration
1287 void vio_unregister_driver(struct vio_driver
*viodrv
)
1289 driver_unregister(&viodrv
->driver
);
1291 EXPORT_SYMBOL(vio_unregister_driver
);
1293 /* vio_dev refcount hit 0 */
1294 static void __devinit
vio_dev_release(struct device
*dev
)
1296 struct iommu_table
*tbl
= get_iommu_table_base(dev
);
1299 iommu_free_table(tbl
, dev
->of_node
?
1300 dev
->of_node
->full_name
: dev_name(dev
));
1301 of_node_put(dev
->of_node
);
1302 kfree(to_vio_dev(dev
));
1306 * vio_register_device_node: - Register a new vio device.
1307 * @of_node: The OF node for this device.
1309 * Creates and initializes a vio_dev structure from the data in
1310 * of_node and adds it to the list of virtual devices.
1311 * Returns a pointer to the created vio_dev or NULL if node has
1312 * NULL device_type or compatible fields.
1314 struct vio_dev
*vio_register_device_node(struct device_node
*of_node
)
1316 struct vio_dev
*viodev
;
1317 struct device_node
*parent_node
;
1318 const unsigned int *unit_address
;
1319 const unsigned int *pfo_resid
= NULL
;
1320 enum vio_dev_family family
;
1321 const char *of_node_name
= of_node
->name
? of_node
->name
: "<unknown>";
1324 * Determine if this node is a under the /vdevice node or under the
1325 * /ibm,platform-facilities node. This decides the device's family.
1327 parent_node
= of_get_parent(of_node
);
1329 if (!strcmp(parent_node
->full_name
, "/ibm,platform-facilities"))
1331 else if (!strcmp(parent_node
->full_name
, "/vdevice"))
1334 pr_warn("%s: parent(%s) of %s not recognized.\n",
1336 parent_node
->full_name
,
1338 of_node_put(parent_node
);
1341 of_node_put(parent_node
);
1343 pr_warn("%s: could not determine the parent of node %s.\n",
1344 __func__
, of_node_name
);
1348 if (family
== PFO
) {
1349 if (of_get_property(of_node
, "interrupt-controller", NULL
)) {
1350 pr_debug("%s: Skipping the interrupt controller %s.\n",
1351 __func__
, of_node_name
);
1356 /* allocate a vio_dev for this node */
1357 viodev
= kzalloc(sizeof(struct vio_dev
), GFP_KERNEL
);
1358 if (viodev
== NULL
) {
1359 pr_warn("%s: allocation failure for VIO device.\n", __func__
);
1363 /* we need the 'device_type' property, in order to match with drivers */
1364 viodev
->family
= family
;
1365 if (viodev
->family
== VDEVICE
) {
1366 if (of_node
->type
!= NULL
)
1367 viodev
->type
= of_node
->type
;
1369 pr_warn("%s: node %s is missing the 'device_type' "
1370 "property.\n", __func__
, of_node_name
);
1374 unit_address
= of_get_property(of_node
, "reg", NULL
);
1375 if (unit_address
== NULL
) {
1376 pr_warn("%s: node %s missing 'reg'\n",
1377 __func__
, of_node_name
);
1380 dev_set_name(&viodev
->dev
, "%x", *unit_address
);
1381 viodev
->irq
= irq_of_parse_and_map(of_node
, 0);
1382 viodev
->unit_address
= *unit_address
;
1384 /* PFO devices need their resource_id for submitting COP_OPs
1385 * This is an optional field for devices, but is required when
1386 * performing synchronous ops */
1387 pfo_resid
= of_get_property(of_node
, "ibm,resource-id", NULL
);
1388 if (pfo_resid
!= NULL
)
1389 viodev
->resource_id
= *pfo_resid
;
1391 unit_address
= NULL
;
1392 dev_set_name(&viodev
->dev
, "%s", of_node_name
);
1393 viodev
->type
= of_node_name
;
1397 viodev
->name
= of_node
->name
;
1398 viodev
->dev
.of_node
= of_node_get(of_node
);
1400 set_dev_node(&viodev
->dev
, of_node_to_nid(of_node
));
1402 /* init generic 'struct device' fields: */
1403 viodev
->dev
.parent
= &vio_bus_device
.dev
;
1404 viodev
->dev
.bus
= &vio_bus_type
;
1405 viodev
->dev
.release
= vio_dev_release
;
1407 if (of_get_property(viodev
->dev
.of_node
, "ibm,my-dma-window", NULL
)) {
1408 if (firmware_has_feature(FW_FEATURE_CMO
))
1409 vio_cmo_set_dma_ops(viodev
);
1411 set_dma_ops(&viodev
->dev
, &dma_iommu_ops
);
1413 set_iommu_table_base(&viodev
->dev
,
1414 vio_build_iommu_table(viodev
));
1416 /* needed to ensure proper operation of coherent allocations
1417 * later, in case driver doesn't set it explicitly */
1418 dma_set_mask(&viodev
->dev
, DMA_BIT_MASK(64));
1419 dma_set_coherent_mask(&viodev
->dev
, DMA_BIT_MASK(64));
1422 /* register with generic device framework */
1423 if (device_register(&viodev
->dev
)) {
1424 printk(KERN_ERR
"%s: failed to register device %s\n",
1425 __func__
, dev_name(&viodev
->dev
));
1426 put_device(&viodev
->dev
);
1432 out
: /* Use this exit point for any return prior to device_register */
1437 EXPORT_SYMBOL(vio_register_device_node
);
1440 * vio_bus_scan_for_devices - Scan OF and register each child device
1441 * @root_name - OF node name for the root of the subtree to search.
1442 * This must be non-NULL
1444 * Starting from the root node provide, register the device node for
1445 * each child beneath the root.
1447 static void vio_bus_scan_register_devices(char *root_name
)
1449 struct device_node
*node_root
, *node_child
;
1454 node_root
= of_find_node_by_name(NULL
, root_name
);
1458 * Create struct vio_devices for each virtual device in
1459 * the device tree. Drivers will associate with them later.
1461 node_child
= of_get_next_child(node_root
, NULL
);
1462 while (node_child
) {
1463 vio_register_device_node(node_child
);
1464 node_child
= of_get_next_child(node_root
, node_child
);
1466 of_node_put(node_root
);
1471 * vio_bus_init: - Initialize the virtual IO bus
1473 static int __init
vio_bus_init(void)
1477 if (firmware_has_feature(FW_FEATURE_CMO
))
1478 vio_cmo_sysfs_init();
1480 err
= bus_register(&vio_bus_type
);
1482 printk(KERN_ERR
"failed to register VIO bus\n");
1487 * The fake parent of all vio devices, just to give us
1490 err
= device_register(&vio_bus_device
.dev
);
1492 printk(KERN_WARNING
"%s: device_register returned %i\n",
1497 if (firmware_has_feature(FW_FEATURE_CMO
))
1502 postcore_initcall(vio_bus_init
);
1504 static int __init
vio_device_init(void)
1506 vio_bus_scan_register_devices("vdevice");
1507 vio_bus_scan_register_devices("ibm,platform-facilities");
1511 device_initcall(vio_device_init
);
1513 static ssize_t
name_show(struct device
*dev
,
1514 struct device_attribute
*attr
, char *buf
)
1516 return sprintf(buf
, "%s\n", to_vio_dev(dev
)->name
);
1519 static ssize_t
devspec_show(struct device
*dev
,
1520 struct device_attribute
*attr
, char *buf
)
1522 struct device_node
*of_node
= dev
->of_node
;
1524 return sprintf(buf
, "%s\n", of_node
? of_node
->full_name
: "none");
1527 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
1530 const struct vio_dev
*vio_dev
= to_vio_dev(dev
);
1531 struct device_node
*dn
;
1537 cp
= of_get_property(dn
, "compatible", NULL
);
1541 return sprintf(buf
, "vio:T%sS%s\n", vio_dev
->type
, cp
);
1544 static struct device_attribute vio_dev_attrs
[] = {
1547 __ATTR_RO(modalias
),
1551 void __devinit
vio_unregister_device(struct vio_dev
*viodev
)
1553 device_unregister(&viodev
->dev
);
1555 EXPORT_SYMBOL(vio_unregister_device
);
1557 static int vio_bus_match(struct device
*dev
, struct device_driver
*drv
)
1559 const struct vio_dev
*vio_dev
= to_vio_dev(dev
);
1560 struct vio_driver
*vio_drv
= to_vio_driver(drv
);
1561 const struct vio_device_id
*ids
= vio_drv
->id_table
;
1563 return (ids
!= NULL
) && (vio_match_device(ids
, vio_dev
) != NULL
);
1566 static int vio_hotplug(struct device
*dev
, struct kobj_uevent_env
*env
)
1568 const struct vio_dev
*vio_dev
= to_vio_dev(dev
);
1569 struct device_node
*dn
;
1575 cp
= of_get_property(dn
, "compatible", NULL
);
1579 add_uevent_var(env
, "MODALIAS=vio:T%sS%s", vio_dev
->type
, cp
);
1583 static struct bus_type vio_bus_type
= {
1585 .dev_attrs
= vio_dev_attrs
,
1586 .uevent
= vio_hotplug
,
1587 .match
= vio_bus_match
,
1588 .probe
= vio_bus_probe
,
1589 .remove
= vio_bus_remove
,
1593 * vio_get_attribute: - get attribute for virtual device
1594 * @vdev: The vio device to get property.
1595 * @which: The property/attribute to be extracted.
1596 * @length: Pointer to length of returned data size (unused if NULL).
1598 * Calls prom.c's of_get_property() to return the value of the
1599 * attribute specified by @which
1601 const void *vio_get_attribute(struct vio_dev
*vdev
, char *which
, int *length
)
1603 return of_get_property(vdev
->dev
.of_node
, which
, length
);
1605 EXPORT_SYMBOL(vio_get_attribute
);
1607 #ifdef CONFIG_PPC_PSERIES
1608 /* vio_find_name() - internal because only vio.c knows how we formatted the
1611 static struct vio_dev
*vio_find_name(const char *name
)
1613 struct device
*found
;
1615 found
= bus_find_device_by_name(&vio_bus_type
, NULL
, name
);
1619 return to_vio_dev(found
);
1623 * vio_find_node - find an already-registered vio_dev
1624 * @vnode: device_node of the virtual device we're looking for
1626 struct vio_dev
*vio_find_node(struct device_node
*vnode
)
1628 const uint32_t *unit_address
;
1630 struct device_node
*vnode_parent
;
1631 const char *dev_type
;
1633 vnode_parent
= of_get_parent(vnode
);
1637 dev_type
= of_get_property(vnode_parent
, "device_type", NULL
);
1638 of_node_put(vnode_parent
);
1642 /* construct the kobject name from the device node */
1643 if (!strcmp(dev_type
, "vdevice")) {
1644 unit_address
= of_get_property(vnode
, "reg", NULL
);
1647 snprintf(kobj_name
, sizeof(kobj_name
), "%x", *unit_address
);
1648 } else if (!strcmp(dev_type
, "ibm,platform-facilities"))
1649 snprintf(kobj_name
, sizeof(kobj_name
), "%s", vnode
->name
);
1653 return vio_find_name(kobj_name
);
1655 EXPORT_SYMBOL(vio_find_node
);
1657 int vio_enable_interrupts(struct vio_dev
*dev
)
1659 int rc
= h_vio_signal(dev
->unit_address
, VIO_IRQ_ENABLE
);
1660 if (rc
!= H_SUCCESS
)
1661 printk(KERN_ERR
"vio: Error 0x%x enabling interrupts\n", rc
);
1664 EXPORT_SYMBOL(vio_enable_interrupts
);
1666 int vio_disable_interrupts(struct vio_dev
*dev
)
1668 int rc
= h_vio_signal(dev
->unit_address
, VIO_IRQ_DISABLE
);
1669 if (rc
!= H_SUCCESS
)
1670 printk(KERN_ERR
"vio: Error 0x%x disabling interrupts\n", rc
);
1673 EXPORT_SYMBOL(vio_disable_interrupts
);
1674 #endif /* CONFIG_PPC_PSERIES */