]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * IBM PowerPC Virtual I/O Infrastructure Support. | |
3 | * | |
a90ab95a | 4 | * Copyright (c) 2003,2008 IBM Corp. |
1da177e4 LT |
5 | * Dave Engebretsen engebret@us.ibm.com |
6 | * Santiago Leon santil@us.ibm.com | |
7 | * Hollis Blanchard <hollisb@us.ibm.com> | |
19dbd0f6 | 8 | * Stephen Rothwell |
a90ab95a | 9 | * Robert Jennings <rcjenn@us.ibm.com> |
1da177e4 LT |
10 | * |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
f2ab6219 | 17 | #include <linux/cpu.h> |
c7f0e8cb | 18 | #include <linux/types.h> |
f2ab6219 | 19 | #include <linux/delay.h> |
b56eade5 | 20 | #include <linux/stat.h> |
c7f0e8cb | 21 | #include <linux/device.h> |
1da177e4 | 22 | #include <linux/init.h> |
5a0e3ad6 | 23 | #include <linux/slab.h> |
1da177e4 | 24 | #include <linux/console.h> |
4b16f8e2 | 25 | #include <linux/export.h> |
1da177e4 LT |
26 | #include <linux/mm.h> |
27 | #include <linux/dma-mapping.h> | |
c7f0e8cb SR |
28 | #include <linux/kobject.h> |
29 | ||
1da177e4 LT |
30 | #include <asm/iommu.h> |
31 | #include <asm/dma.h> | |
1da177e4 | 32 | #include <asm/vio.h> |
143dcec2 | 33 | #include <asm/prom.h> |
e10fa773 | 34 | #include <asm/firmware.h> |
c7f0e8cb | 35 | #include <asm/tce.h> |
c7f0e8cb SR |
36 | #include <asm/page.h> |
37 | #include <asm/hvcall.h> | |
c7f0e8cb | 38 | |
c7f0e8cb | 39 | static struct vio_dev vio_bus_device = { /* fake "parent" device */ |
aab0d375 | 40 | .name = "vio", |
ac5b33c9 | 41 | .type = "", |
aab0d375 | 42 | .dev.init_name = "vio", |
ac5b33c9 | 43 | .dev.bus = &vio_bus_type, |
1da177e4 | 44 | }; |
ac5b33c9 | 45 | |
a90ab95a RJ |
46 | #ifdef CONFIG_PPC_SMLPAR |
47 | /** | |
48 | * vio_cmo_pool - A pool of IO memory for CMO use | |
49 | * | |
50 | * @size: The size of the pool in bytes | |
51 | * @free: The amount of free memory in the pool | |
52 | */ | |
53 | struct vio_cmo_pool { | |
54 | size_t size; | |
55 | size_t free; | |
56 | }; | |
57 | ||
58 | /* How many ms to delay queued balance work */ | |
59 | #define VIO_CMO_BALANCE_DELAY 100 | |
60 | ||
61 | /* Portion out IO memory to CMO devices by this chunk size */ | |
62 | #define VIO_CMO_BALANCE_CHUNK 131072 | |
63 | ||
64 | /** | |
65 | * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement | |
66 | * | |
67 | * @vio_dev: struct vio_dev pointer | |
68 | * @list: pointer to other devices on bus that are being tracked | |
69 | */ | |
70 | struct vio_cmo_dev_entry { | |
71 | struct vio_dev *viodev; | |
72 | struct list_head list; | |
73 | }; | |
74 | ||
75 | /** | |
76 | * vio_cmo - VIO bus accounting structure for CMO entitlement | |
77 | * | |
78 | * @lock: spinlock for entire structure | |
79 | * @balance_q: work queue for balancing system entitlement | |
80 | * @device_list: list of CMO-enabled devices requiring entitlement | |
81 | * @entitled: total system entitlement in bytes | |
82 | * @reserve: pool of memory from which devices reserve entitlement, incl. spare | |
83 | * @excess: pool of excess entitlement not needed for device reserves or spare | |
84 | * @spare: IO memory for device hotplug functionality | |
85 | * @min: minimum necessary for system operation | |
86 | * @desired: desired memory for system operation | |
87 | * @curr: bytes currently allocated | |
88 | * @high: high water mark for IO data usage | |
89 | */ | |
90 | struct vio_cmo { | |
91 | spinlock_t lock; | |
92 | struct delayed_work balance_q; | |
93 | struct list_head device_list; | |
94 | size_t entitled; | |
95 | struct vio_cmo_pool reserve; | |
96 | struct vio_cmo_pool excess; | |
97 | size_t spare; | |
98 | size_t min; | |
99 | size_t desired; | |
100 | size_t curr; | |
101 | size_t high; | |
102 | } vio_cmo; | |
103 | ||
104 | /** | |
105 | * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows | |
106 | */ | |
107 | static int vio_cmo_num_OF_devs(void) | |
108 | { | |
109 | struct device_node *node_vroot; | |
110 | int count = 0; | |
111 | ||
112 | /* | |
113 | * Count the number of vdevice entries with an | |
114 | * ibm,my-dma-window OF property | |
115 | */ | |
116 | node_vroot = of_find_node_by_name(NULL, "vdevice"); | |
117 | if (node_vroot) { | |
118 | struct device_node *of_node; | |
119 | struct property *prop; | |
120 | ||
121 | for_each_child_of_node(node_vroot, of_node) { | |
122 | prop = of_find_property(of_node, "ibm,my-dma-window", | |
123 | NULL); | |
124 | if (prop) | |
125 | count++; | |
126 | } | |
127 | } | |
128 | of_node_put(node_vroot); | |
129 | return count; | |
130 | } | |
131 | ||
132 | /** | |
133 | * vio_cmo_alloc - allocate IO memory for CMO-enable devices | |
134 | * | |
135 | * @viodev: VIO device requesting IO memory | |
136 | * @size: size of allocation requested | |
137 | * | |
138 | * Allocations come from memory reserved for the devices and any excess | |
139 | * IO memory available to all devices. The spare pool used to service | |
140 | * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be | |
141 | * made available. | |
142 | * | |
143 | * Return codes: | |
144 | * 0 for successful allocation and -ENOMEM for a failure | |
145 | */ | |
146 | static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) | |
147 | { | |
148 | unsigned long flags; | |
149 | size_t reserve_free = 0; | |
150 | size_t excess_free = 0; | |
151 | int ret = -ENOMEM; | |
152 | ||
153 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
154 | ||
155 | /* Determine the amount of free entitlement available in reserve */ | |
156 | if (viodev->cmo.entitled > viodev->cmo.allocated) | |
157 | reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; | |
158 | ||
159 | /* If spare is not fulfilled, the excess pool can not be used. */ | |
160 | if (vio_cmo.spare >= VIO_CMO_MIN_ENT) | |
161 | excess_free = vio_cmo.excess.free; | |
162 | ||
163 | /* The request can be satisfied */ | |
164 | if ((reserve_free + excess_free) >= size) { | |
165 | vio_cmo.curr += size; | |
166 | if (vio_cmo.curr > vio_cmo.high) | |
167 | vio_cmo.high = vio_cmo.curr; | |
168 | viodev->cmo.allocated += size; | |
169 | size -= min(reserve_free, size); | |
170 | vio_cmo.excess.free -= size; | |
171 | ret = 0; | |
172 | } | |
173 | ||
174 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
175 | return ret; | |
176 | } | |
177 | ||
178 | /** | |
179 | * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices | |
180 | * @viodev: VIO device freeing IO memory | |
181 | * @size: size of deallocation | |
182 | * | |
183 | * IO memory is freed by the device back to the correct memory pools. | |
184 | * The spare pool is replenished first from either memory pool, then | |
185 | * the reserve pool is used to reduce device entitlement, the excess | |
186 | * pool is used to increase the reserve pool toward the desired entitlement | |
187 | * target, and then the remaining memory is returned to the pools. | |
188 | * | |
189 | */ | |
190 | static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) | |
191 | { | |
192 | unsigned long flags; | |
193 | size_t spare_needed = 0; | |
194 | size_t excess_freed = 0; | |
195 | size_t reserve_freed = size; | |
196 | size_t tmp; | |
197 | int balance = 0; | |
198 | ||
199 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
200 | vio_cmo.curr -= size; | |
201 | ||
202 | /* Amount of memory freed from the excess pool */ | |
203 | if (viodev->cmo.allocated > viodev->cmo.entitled) { | |
204 | excess_freed = min(reserve_freed, (viodev->cmo.allocated - | |
205 | viodev->cmo.entitled)); | |
206 | reserve_freed -= excess_freed; | |
207 | } | |
208 | ||
209 | /* Remove allocation from device */ | |
210 | viodev->cmo.allocated -= (reserve_freed + excess_freed); | |
211 | ||
212 | /* Spare is a subset of the reserve pool, replenish it first. */ | |
213 | spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare; | |
214 | ||
215 | /* | |
216 | * Replenish the spare in the reserve pool from the excess pool. | |
217 | * This moves entitlement into the reserve pool. | |
218 | */ | |
219 | if (spare_needed && excess_freed) { | |
220 | tmp = min(excess_freed, spare_needed); | |
221 | vio_cmo.excess.size -= tmp; | |
222 | vio_cmo.reserve.size += tmp; | |
223 | vio_cmo.spare += tmp; | |
224 | excess_freed -= tmp; | |
225 | spare_needed -= tmp; | |
226 | balance = 1; | |
227 | } | |
228 | ||
229 | /* | |
230 | * Replenish the spare in the reserve pool from the reserve pool. | |
231 | * This removes entitlement from the device down to VIO_CMO_MIN_ENT, | |
232 | * if needed, and gives it to the spare pool. The amount of used | |
233 | * memory in this pool does not change. | |
234 | */ | |
235 | if (spare_needed && reserve_freed) { | |
732eacc0 | 236 | tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); |
a90ab95a RJ |
237 | |
238 | vio_cmo.spare += tmp; | |
239 | viodev->cmo.entitled -= tmp; | |
240 | reserve_freed -= tmp; | |
241 | spare_needed -= tmp; | |
242 | balance = 1; | |
243 | } | |
244 | ||
245 | /* | |
246 | * Increase the reserve pool until the desired allocation is met. | |
247 | * Move an allocation freed from the excess pool into the reserve | |
248 | * pool and schedule a balance operation. | |
249 | */ | |
250 | if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { | |
251 | tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); | |
252 | ||
253 | vio_cmo.excess.size -= tmp; | |
254 | vio_cmo.reserve.size += tmp; | |
255 | excess_freed -= tmp; | |
256 | balance = 1; | |
257 | } | |
258 | ||
259 | /* Return memory from the excess pool to that pool */ | |
260 | if (excess_freed) | |
261 | vio_cmo.excess.free += excess_freed; | |
262 | ||
263 | if (balance) | |
264 | schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY); | |
265 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
266 | } | |
267 | ||
268 | /** | |
269 | * vio_cmo_entitlement_update - Manage system entitlement changes | |
270 | * | |
271 | * @new_entitlement: new system entitlement to attempt to accommodate | |
272 | * | |
273 | * Increases in entitlement will be used to fulfill the spare entitlement | |
274 | * and the rest is given to the excess pool. Decreases, if they are | |
275 | * possible, come from the excess pool and from unused device entitlement | |
276 | * | |
277 | * Returns: 0 on success, -ENOMEM when change can not be made | |
278 | */ | |
279 | int vio_cmo_entitlement_update(size_t new_entitlement) | |
280 | { | |
281 | struct vio_dev *viodev; | |
282 | struct vio_cmo_dev_entry *dev_ent; | |
283 | unsigned long flags; | |
284 | size_t avail, delta, tmp; | |
285 | ||
286 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
287 | ||
288 | /* Entitlement increases */ | |
289 | if (new_entitlement > vio_cmo.entitled) { | |
290 | delta = new_entitlement - vio_cmo.entitled; | |
291 | ||
292 | /* Fulfill spare allocation */ | |
293 | if (vio_cmo.spare < VIO_CMO_MIN_ENT) { | |
294 | tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare)); | |
295 | vio_cmo.spare += tmp; | |
296 | vio_cmo.reserve.size += tmp; | |
297 | delta -= tmp; | |
298 | } | |
299 | ||
300 | /* Remaining new allocation goes to the excess pool */ | |
301 | vio_cmo.entitled += delta; | |
302 | vio_cmo.excess.size += delta; | |
303 | vio_cmo.excess.free += delta; | |
304 | ||
305 | goto out; | |
306 | } | |
307 | ||
308 | /* Entitlement decreases */ | |
309 | delta = vio_cmo.entitled - new_entitlement; | |
310 | avail = vio_cmo.excess.free; | |
311 | ||
312 | /* | |
313 | * Need to check how much unused entitlement each device can | |
314 | * sacrifice to fulfill entitlement change. | |
315 | */ | |
316 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
317 | if (avail >= delta) | |
318 | break; | |
319 | ||
320 | viodev = dev_ent->viodev; | |
321 | if ((viodev->cmo.entitled > viodev->cmo.allocated) && | |
322 | (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) | |
323 | avail += viodev->cmo.entitled - | |
324 | max_t(size_t, viodev->cmo.allocated, | |
325 | VIO_CMO_MIN_ENT); | |
326 | } | |
327 | ||
328 | if (delta <= avail) { | |
329 | vio_cmo.entitled -= delta; | |
330 | ||
331 | /* Take entitlement from the excess pool first */ | |
332 | tmp = min(vio_cmo.excess.free, delta); | |
333 | vio_cmo.excess.size -= tmp; | |
334 | vio_cmo.excess.free -= tmp; | |
335 | delta -= tmp; | |
336 | ||
337 | /* | |
338 | * Remove all but VIO_CMO_MIN_ENT bytes from devices | |
339 | * until entitlement change is served | |
340 | */ | |
341 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
342 | if (!delta) | |
343 | break; | |
344 | ||
345 | viodev = dev_ent->viodev; | |
346 | tmp = 0; | |
347 | if ((viodev->cmo.entitled > viodev->cmo.allocated) && | |
348 | (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) | |
349 | tmp = viodev->cmo.entitled - | |
350 | max_t(size_t, viodev->cmo.allocated, | |
351 | VIO_CMO_MIN_ENT); | |
352 | viodev->cmo.entitled -= min(tmp, delta); | |
353 | delta -= min(tmp, delta); | |
354 | } | |
355 | } else { | |
356 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
357 | return -ENOMEM; | |
358 | } | |
359 | ||
360 | out: | |
361 | schedule_delayed_work(&vio_cmo.balance_q, 0); | |
362 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
363 | return 0; | |
364 | } | |
365 | ||
366 | /** | |
367 | * vio_cmo_balance - Balance entitlement among devices | |
368 | * | |
369 | * @work: work queue structure for this operation | |
370 | * | |
371 | * Any system entitlement above the minimum needed for devices, or | |
372 | * already allocated to devices, can be distributed to the devices. | |
373 | * The list of devices is iterated through to recalculate the desired | |
374 | * entitlement level and to determine how much entitlement above the | |
375 | * minimum entitlement is allocated to devices. | |
376 | * | |
377 | * Small chunks of the available entitlement are given to devices until | |
378 | * their requirements are fulfilled or there is no entitlement left to give. | |
379 | * Upon completion sizes of the reserve and excess pools are calculated. | |
380 | * | |
381 | * The system minimum entitlement level is also recalculated here. | |
382 | * Entitlement will be reserved for devices even after vio_bus_remove to | |
383 | * accommodate reloading the driver. The OF tree is walked to count the | |
384 | * number of devices present and this will remove entitlement for devices | |
385 | * that have actually left the system after having vio_bus_remove called. | |
386 | */ | |
387 | static void vio_cmo_balance(struct work_struct *work) | |
388 | { | |
389 | struct vio_cmo *cmo; | |
390 | struct vio_dev *viodev; | |
391 | struct vio_cmo_dev_entry *dev_ent; | |
392 | unsigned long flags; | |
393 | size_t avail = 0, level, chunk, need; | |
394 | int devcount = 0, fulfilled; | |
395 | ||
396 | cmo = container_of(work, struct vio_cmo, balance_q.work); | |
397 | ||
398 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
399 | ||
400 | /* Calculate minimum entitlement and fulfill spare */ | |
401 | cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT; | |
402 | BUG_ON(cmo->min > cmo->entitled); | |
403 | cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min)); | |
404 | cmo->min += cmo->spare; | |
405 | cmo->desired = cmo->min; | |
406 | ||
407 | /* | |
408 | * Determine how much entitlement is available and reset device | |
409 | * entitlements | |
410 | */ | |
411 | avail = cmo->entitled - cmo->spare; | |
412 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
413 | viodev = dev_ent->viodev; | |
414 | devcount++; | |
415 | viodev->cmo.entitled = VIO_CMO_MIN_ENT; | |
416 | cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); | |
417 | avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); | |
418 | } | |
419 | ||
420 | /* | |
421 | * Having provided each device with the minimum entitlement, loop | |
422 | * over the devices portioning out the remaining entitlement | |
423 | * until there is nothing left. | |
424 | */ | |
425 | level = VIO_CMO_MIN_ENT; | |
426 | while (avail) { | |
427 | fulfilled = 0; | |
428 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
429 | viodev = dev_ent->viodev; | |
430 | ||
431 | if (viodev->cmo.desired <= level) { | |
432 | fulfilled++; | |
433 | continue; | |
434 | } | |
435 | ||
436 | /* | |
437 | * Give the device up to VIO_CMO_BALANCE_CHUNK | |
438 | * bytes of entitlement, but do not exceed the | |
439 | * desired level of entitlement for the device. | |
440 | */ | |
441 | chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); | |
442 | chunk = min(chunk, (viodev->cmo.desired - | |
443 | viodev->cmo.entitled)); | |
444 | viodev->cmo.entitled += chunk; | |
445 | ||
446 | /* | |
447 | * If the memory for this entitlement increase was | |
448 | * already allocated to the device it does not come | |
449 | * from the available pool being portioned out. | |
450 | */ | |
451 | need = max(viodev->cmo.allocated, viodev->cmo.entitled)- | |
452 | max(viodev->cmo.allocated, level); | |
453 | avail -= need; | |
454 | ||
455 | } | |
456 | if (fulfilled == devcount) | |
457 | break; | |
458 | level += VIO_CMO_BALANCE_CHUNK; | |
459 | } | |
460 | ||
461 | /* Calculate new reserve and excess pool sizes */ | |
462 | cmo->reserve.size = cmo->min; | |
463 | cmo->excess.free = 0; | |
464 | cmo->excess.size = 0; | |
465 | need = 0; | |
466 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | |
467 | viodev = dev_ent->viodev; | |
468 | /* Calculated reserve size above the minimum entitlement */ | |
469 | if (viodev->cmo.entitled) | |
470 | cmo->reserve.size += (viodev->cmo.entitled - | |
471 | VIO_CMO_MIN_ENT); | |
472 | /* Calculated used excess entitlement */ | |
473 | if (viodev->cmo.allocated > viodev->cmo.entitled) | |
474 | need += viodev->cmo.allocated - viodev->cmo.entitled; | |
475 | } | |
476 | cmo->excess.size = cmo->entitled - cmo->reserve.size; | |
477 | cmo->excess.free = cmo->excess.size - need; | |
478 | ||
bf6aede7 | 479 | cancel_delayed_work(to_delayed_work(work)); |
a90ab95a RJ |
480 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
481 | } | |
482 | ||
483 | static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, | |
bfbf7d61 AP |
484 | dma_addr_t *dma_handle, gfp_t flag, |
485 | struct dma_attrs *attrs) | |
a90ab95a RJ |
486 | { |
487 | struct vio_dev *viodev = to_vio_dev(dev); | |
488 | void *ret; | |
489 | ||
69b052e8 | 490 | if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { |
a90ab95a RJ |
491 | atomic_inc(&viodev->cmo.allocs_failed); |
492 | return NULL; | |
493 | } | |
494 | ||
bfbf7d61 | 495 | ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs); |
a90ab95a | 496 | if (unlikely(ret == NULL)) { |
69b052e8 | 497 | vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); |
a90ab95a RJ |
498 | atomic_inc(&viodev->cmo.allocs_failed); |
499 | } | |
500 | ||
501 | return ret; | |
502 | } | |
503 | ||
504 | static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, | |
bfbf7d61 AP |
505 | void *vaddr, dma_addr_t dma_handle, |
506 | struct dma_attrs *attrs) | |
a90ab95a RJ |
507 | { |
508 | struct vio_dev *viodev = to_vio_dev(dev); | |
509 | ||
bfbf7d61 | 510 | dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs); |
a90ab95a | 511 | |
69b052e8 | 512 | vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); |
a90ab95a RJ |
513 | } |
514 | ||
f9226d57 MN |
515 | static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, |
516 | unsigned long offset, size_t size, | |
517 | enum dma_data_direction direction, | |
518 | struct dma_attrs *attrs) | |
a90ab95a RJ |
519 | { |
520 | struct vio_dev *viodev = to_vio_dev(dev); | |
521 | dma_addr_t ret = DMA_ERROR_CODE; | |
522 | ||
523 | if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) { | |
524 | atomic_inc(&viodev->cmo.allocs_failed); | |
525 | return ret; | |
526 | } | |
527 | ||
f9226d57 | 528 | ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); |
0764bf63 | 529 | if (unlikely(dma_mapping_error(dev, ret))) { |
a90ab95a RJ |
530 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); |
531 | atomic_inc(&viodev->cmo.allocs_failed); | |
532 | } | |
533 | ||
534 | return ret; | |
535 | } | |
536 | ||
f9226d57 MN |
537 | static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, |
538 | size_t size, | |
539 | enum dma_data_direction direction, | |
540 | struct dma_attrs *attrs) | |
a90ab95a RJ |
541 | { |
542 | struct vio_dev *viodev = to_vio_dev(dev); | |
543 | ||
f9226d57 | 544 | dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); |
a90ab95a RJ |
545 | |
546 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); | |
547 | } | |
548 | ||
549 | static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | |
550 | int nelems, enum dma_data_direction direction, | |
551 | struct dma_attrs *attrs) | |
552 | { | |
553 | struct vio_dev *viodev = to_vio_dev(dev); | |
554 | struct scatterlist *sgl; | |
555 | int ret, count = 0; | |
556 | size_t alloc_size = 0; | |
557 | ||
558 | for (sgl = sglist; count < nelems; count++, sgl++) | |
559 | alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE); | |
560 | ||
561 | if (vio_cmo_alloc(viodev, alloc_size)) { | |
562 | atomic_inc(&viodev->cmo.allocs_failed); | |
563 | return 0; | |
564 | } | |
565 | ||
566 | ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs); | |
567 | ||
568 | if (unlikely(!ret)) { | |
569 | vio_cmo_dealloc(viodev, alloc_size); | |
570 | atomic_inc(&viodev->cmo.allocs_failed); | |
69b052e8 | 571 | return ret; |
a90ab95a RJ |
572 | } |
573 | ||
574 | for (sgl = sglist, count = 0; count < ret; count++, sgl++) | |
575 | alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE); | |
576 | if (alloc_size) | |
577 | vio_cmo_dealloc(viodev, alloc_size); | |
578 | ||
579 | return ret; | |
580 | } | |
581 | ||
582 | static void vio_dma_iommu_unmap_sg(struct device *dev, | |
583 | struct scatterlist *sglist, int nelems, | |
584 | enum dma_data_direction direction, | |
585 | struct dma_attrs *attrs) | |
586 | { | |
587 | struct vio_dev *viodev = to_vio_dev(dev); | |
588 | struct scatterlist *sgl; | |
589 | size_t alloc_size = 0; | |
590 | int count = 0; | |
591 | ||
592 | for (sgl = sglist; count < nelems; count++, sgl++) | |
593 | alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE); | |
594 | ||
595 | dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); | |
596 | ||
597 | vio_cmo_dealloc(viodev, alloc_size); | |
598 | } | |
599 | ||
6d283d78 NA |
600 | static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask) |
601 | { | |
602 | return dma_iommu_ops.dma_supported(dev, mask); | |
603 | } | |
604 | ||
d24f9c69 MM |
605 | static u64 vio_dma_get_required_mask(struct device *dev) |
606 | { | |
607 | return dma_iommu_ops.get_required_mask(dev); | |
608 | } | |
609 | ||
45223c54 | 610 | struct dma_map_ops vio_dma_mapping_ops = { |
bfbf7d61 AP |
611 | .alloc = vio_dma_iommu_alloc_coherent, |
612 | .free = vio_dma_iommu_free_coherent, | |
64ccc9c0 | 613 | .mmap = dma_direct_mmap_coherent, |
2eccacd0 MM |
614 | .map_sg = vio_dma_iommu_map_sg, |
615 | .unmap_sg = vio_dma_iommu_unmap_sg, | |
616 | .map_page = vio_dma_iommu_map_page, | |
617 | .unmap_page = vio_dma_iommu_unmap_page, | |
618 | .dma_supported = vio_dma_iommu_dma_supported, | |
d24f9c69 | 619 | .get_required_mask = vio_dma_get_required_mask, |
a90ab95a RJ |
620 | }; |
621 | ||
622 | /** | |
623 | * vio_cmo_set_dev_desired - Set desired entitlement for a device | |
624 | * | |
625 | * @viodev: struct vio_dev for device to alter | |
f0a875fd | 626 | * @desired: new desired entitlement level in bytes |
a90ab95a RJ |
627 | * |
628 | * For use by devices to request a change to their entitlement at runtime or | |
629 | * through sysfs. The desired entitlement level is changed and a balancing | |
630 | * of system resources is scheduled to run in the future. | |
631 | */ | |
632 | void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) | |
633 | { | |
634 | unsigned long flags; | |
635 | struct vio_cmo_dev_entry *dev_ent; | |
636 | int found = 0; | |
637 | ||
638 | if (!firmware_has_feature(FW_FEATURE_CMO)) | |
639 | return; | |
640 | ||
641 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
642 | if (desired < VIO_CMO_MIN_ENT) | |
643 | desired = VIO_CMO_MIN_ENT; | |
644 | ||
645 | /* | |
646 | * Changes will not be made for devices not in the device list. | |
647 | * If it is not in the device list, then no driver is loaded | |
648 | * for the device and it can not receive entitlement. | |
649 | */ | |
650 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) | |
651 | if (viodev == dev_ent->viodev) { | |
652 | found = 1; | |
653 | break; | |
654 | } | |
f6d8c8bb JL |
655 | if (!found) { |
656 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
a90ab95a | 657 | return; |
f6d8c8bb | 658 | } |
a90ab95a RJ |
659 | |
660 | /* Increase/decrease in desired device entitlement */ | |
661 | if (desired >= viodev->cmo.desired) { | |
662 | /* Just bump the bus and device values prior to a balance*/ | |
663 | vio_cmo.desired += desired - viodev->cmo.desired; | |
664 | viodev->cmo.desired = desired; | |
665 | } else { | |
666 | /* Decrease bus and device values for desired entitlement */ | |
667 | vio_cmo.desired -= viodev->cmo.desired - desired; | |
668 | viodev->cmo.desired = desired; | |
669 | /* | |
670 | * If less entitlement is desired than current entitlement, move | |
671 | * any reserve memory in the change region to the excess pool. | |
672 | */ | |
673 | if (viodev->cmo.entitled > desired) { | |
674 | vio_cmo.reserve.size -= viodev->cmo.entitled - desired; | |
675 | vio_cmo.excess.size += viodev->cmo.entitled - desired; | |
676 | /* | |
677 | * If entitlement moving from the reserve pool to the | |
678 | * excess pool is currently unused, add to the excess | |
679 | * free counter. | |
680 | */ | |
681 | if (viodev->cmo.allocated < viodev->cmo.entitled) | |
682 | vio_cmo.excess.free += viodev->cmo.entitled - | |
683 | max(viodev->cmo.allocated, desired); | |
684 | viodev->cmo.entitled = desired; | |
685 | } | |
686 | } | |
687 | schedule_delayed_work(&vio_cmo.balance_q, 0); | |
688 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
689 | } | |
690 | ||
691 | /** | |
692 | * vio_cmo_bus_probe - Handle CMO specific bus probe activities | |
693 | * | |
694 | * @viodev - Pointer to struct vio_dev for device | |
695 | * | |
696 | * Determine the devices IO memory entitlement needs, attempting | |
697 | * to satisfy the system minimum entitlement at first and scheduling | |
698 | * a balance operation to take care of the rest at a later time. | |
699 | * | |
700 | * Returns: 0 on success, -EINVAL when device doesn't support CMO, and | |
701 | * -ENOMEM when entitlement is not available for device or | |
702 | * device entry. | |
703 | * | |
704 | */ | |
705 | static int vio_cmo_bus_probe(struct vio_dev *viodev) | |
706 | { | |
707 | struct vio_cmo_dev_entry *dev_ent; | |
708 | struct device *dev = &viodev->dev; | |
709 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | |
710 | unsigned long flags; | |
711 | size_t size; | |
f2ab6219 KY |
712 | bool dma_capable = false; |
713 | ||
714 | /* A device requires entitlement if it has a DMA window property */ | |
715 | switch (viodev->family) { | |
716 | case VDEVICE: | |
717 | if (of_get_property(viodev->dev.of_node, | |
718 | "ibm,my-dma-window", NULL)) | |
719 | dma_capable = true; | |
720 | break; | |
721 | case PFO: | |
722 | dma_capable = false; | |
723 | break; | |
724 | default: | |
725 | dev_warn(dev, "unknown device family: %d\n", viodev->family); | |
726 | BUG(); | |
727 | break; | |
728 | } | |
a90ab95a | 729 | |
f2ab6219 KY |
730 | /* Configure entitlement for the device. */ |
731 | if (dma_capable) { | |
a90ab95a RJ |
732 | /* Check that the driver is CMO enabled and get desired DMA */ |
733 | if (!viodrv->get_desired_dma) { | |
734 | dev_err(dev, "%s: device driver does not support CMO\n", | |
735 | __func__); | |
736 | return -EINVAL; | |
737 | } | |
738 | ||
739 | viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev)); | |
740 | if (viodev->cmo.desired < VIO_CMO_MIN_ENT) | |
741 | viodev->cmo.desired = VIO_CMO_MIN_ENT; | |
742 | size = VIO_CMO_MIN_ENT; | |
743 | ||
744 | dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry), | |
745 | GFP_KERNEL); | |
746 | if (!dev_ent) | |
747 | return -ENOMEM; | |
748 | ||
749 | dev_ent->viodev = viodev; | |
750 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
751 | list_add(&dev_ent->list, &vio_cmo.device_list); | |
752 | } else { | |
753 | viodev->cmo.desired = 0; | |
754 | size = 0; | |
755 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
756 | } | |
757 | ||
758 | /* | |
759 | * If the needs for vio_cmo.min have not changed since they | |
760 | * were last set, the number of devices in the OF tree has | |
761 | * been constant and the IO memory for this is already in | |
762 | * the reserve pool. | |
763 | */ | |
764 | if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) * | |
765 | VIO_CMO_MIN_ENT)) { | |
766 | /* Updated desired entitlement if device requires it */ | |
767 | if (size) | |
768 | vio_cmo.desired += (viodev->cmo.desired - | |
769 | VIO_CMO_MIN_ENT); | |
770 | } else { | |
771 | size_t tmp; | |
772 | ||
773 | tmp = vio_cmo.spare + vio_cmo.excess.free; | |
774 | if (tmp < size) { | |
775 | dev_err(dev, "%s: insufficient free " | |
776 | "entitlement to add device. " | |
777 | "Need %lu, have %lu\n", __func__, | |
778 | size, (vio_cmo.spare + tmp)); | |
779 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
780 | return -ENOMEM; | |
781 | } | |
782 | ||
783 | /* Use excess pool first to fulfill request */ | |
784 | tmp = min(size, vio_cmo.excess.free); | |
785 | vio_cmo.excess.free -= tmp; | |
786 | vio_cmo.excess.size -= tmp; | |
787 | vio_cmo.reserve.size += tmp; | |
788 | ||
789 | /* Use spare if excess pool was insufficient */ | |
790 | vio_cmo.spare -= size - tmp; | |
791 | ||
792 | /* Update bus accounting */ | |
793 | vio_cmo.min += size; | |
794 | vio_cmo.desired += viodev->cmo.desired; | |
795 | } | |
796 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
797 | return 0; | |
798 | } | |
799 | ||
800 | /** | |
801 | * vio_cmo_bus_remove - Handle CMO specific bus removal activities | |
802 | * | |
803 | * @viodev - Pointer to struct vio_dev for device | |
804 | * | |
805 | * Remove the device from the cmo device list. The minimum entitlement | |
806 | * will be reserved for the device as long as it is in the system. The | |
807 | * rest of the entitlement the device had been allocated will be returned | |
808 | * to the system. | |
809 | */ | |
810 | static void vio_cmo_bus_remove(struct vio_dev *viodev) | |
811 | { | |
812 | struct vio_cmo_dev_entry *dev_ent; | |
813 | unsigned long flags; | |
814 | size_t tmp; | |
815 | ||
816 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
817 | if (viodev->cmo.allocated) { | |
818 | dev_err(&viodev->dev, "%s: device had %lu bytes of IO " | |
819 | "allocated after remove operation.\n", | |
820 | __func__, viodev->cmo.allocated); | |
821 | BUG(); | |
822 | } | |
823 | ||
824 | /* | |
825 | * Remove the device from the device list being maintained for | |
826 | * CMO enabled devices. | |
827 | */ | |
828 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) | |
829 | if (viodev == dev_ent->viodev) { | |
830 | list_del(&dev_ent->list); | |
831 | kfree(dev_ent); | |
832 | break; | |
833 | } | |
834 | ||
835 | /* | |
836 | * Devices may not require any entitlement and they do not need | |
837 | * to be processed. Otherwise, return the device's entitlement | |
838 | * back to the pools. | |
839 | */ | |
840 | if (viodev->cmo.entitled) { | |
841 | /* | |
842 | * This device has not yet left the OF tree, it's | |
843 | * minimum entitlement remains in vio_cmo.min and | |
844 | * vio_cmo.desired | |
845 | */ | |
846 | vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); | |
847 | ||
848 | /* | |
849 | * Save min allocation for device in reserve as long | |
850 | * as it exists in OF tree as determined by later | |
851 | * balance operation | |
852 | */ | |
853 | viodev->cmo.entitled -= VIO_CMO_MIN_ENT; | |
854 | ||
855 | /* Replenish spare from freed reserve pool */ | |
856 | if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { | |
857 | tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - | |
858 | vio_cmo.spare)); | |
859 | vio_cmo.spare += tmp; | |
860 | viodev->cmo.entitled -= tmp; | |
861 | } | |
862 | ||
863 | /* Remaining reserve goes to excess pool */ | |
864 | vio_cmo.excess.size += viodev->cmo.entitled; | |
865 | vio_cmo.excess.free += viodev->cmo.entitled; | |
866 | vio_cmo.reserve.size -= viodev->cmo.entitled; | |
867 | ||
868 | /* | |
869 | * Until the device is removed it will keep a | |
870 | * minimum entitlement; this will guarantee that | |
871 | * a module unload/load will result in a success. | |
872 | */ | |
873 | viodev->cmo.entitled = VIO_CMO_MIN_ENT; | |
874 | viodev->cmo.desired = VIO_CMO_MIN_ENT; | |
875 | atomic_set(&viodev->cmo.allocs_failed, 0); | |
876 | } | |
877 | ||
878 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
879 | } | |
880 | ||
881 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) | |
882 | { | |
6d283d78 | 883 | set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); |
a90ab95a RJ |
884 | } |
885 | ||
886 | /** | |
887 | * vio_cmo_bus_init - CMO entitlement initialization at bus init time | |
888 | * | |
889 | * Set up the reserve and excess entitlement pools based on available | |
890 | * system entitlement and the number of devices in the OF tree that | |
891 | * require entitlement in the reserve pool. | |
892 | */ | |
893 | static void vio_cmo_bus_init(void) | |
894 | { | |
895 | struct hvcall_mpp_data mpp_data; | |
896 | int err; | |
897 | ||
898 | memset(&vio_cmo, 0, sizeof(struct vio_cmo)); | |
899 | spin_lock_init(&vio_cmo.lock); | |
900 | INIT_LIST_HEAD(&vio_cmo.device_list); | |
901 | INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance); | |
902 | ||
903 | /* Get current system entitlement */ | |
904 | err = h_get_mpp(&mpp_data); | |
905 | ||
906 | /* | |
907 | * On failure, continue with entitlement set to 0, will panic() | |
908 | * later when spare is reserved. | |
909 | */ | |
910 | if (err != H_SUCCESS) { | |
911 | printk(KERN_ERR "%s: unable to determine system IO "\ | |
912 | "entitlement. (%d)\n", __func__, err); | |
913 | vio_cmo.entitled = 0; | |
914 | } else { | |
915 | vio_cmo.entitled = mpp_data.entitled_mem; | |
916 | } | |
917 | ||
918 | /* Set reservation and check against entitlement */ | |
919 | vio_cmo.spare = VIO_CMO_MIN_ENT; | |
920 | vio_cmo.reserve.size = vio_cmo.spare; | |
921 | vio_cmo.reserve.size += (vio_cmo_num_OF_devs() * | |
922 | VIO_CMO_MIN_ENT); | |
923 | if (vio_cmo.reserve.size > vio_cmo.entitled) { | |
924 | printk(KERN_ERR "%s: insufficient system entitlement\n", | |
925 | __func__); | |
926 | panic("%s: Insufficient system entitlement", __func__); | |
927 | } | |
928 | ||
929 | /* Set the remaining accounting variables */ | |
930 | vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size; | |
931 | vio_cmo.excess.free = vio_cmo.excess.size; | |
932 | vio_cmo.min = vio_cmo.reserve.size; | |
933 | vio_cmo.desired = vio_cmo.reserve.size; | |
934 | } | |
935 | ||
936 | /* sysfs device functions and data structures for CMO */ | |
937 | ||
938 | #define viodev_cmo_rd_attr(name) \ | |
939 | static ssize_t viodev_cmo_##name##_show(struct device *dev, \ | |
940 | struct device_attribute *attr, \ | |
941 | char *buf) \ | |
942 | { \ | |
943 | return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \ | |
944 | } | |
945 | ||
946 | static ssize_t viodev_cmo_allocs_failed_show(struct device *dev, | |
947 | struct device_attribute *attr, char *buf) | |
948 | { | |
949 | struct vio_dev *viodev = to_vio_dev(dev); | |
950 | return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); | |
951 | } | |
952 | ||
953 | static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev, | |
954 | struct device_attribute *attr, const char *buf, size_t count) | |
955 | { | |
956 | struct vio_dev *viodev = to_vio_dev(dev); | |
957 | atomic_set(&viodev->cmo.allocs_failed, 0); | |
958 | return count; | |
959 | } | |
960 | ||
961 | static ssize_t viodev_cmo_desired_set(struct device *dev, | |
962 | struct device_attribute *attr, const char *buf, size_t count) | |
963 | { | |
964 | struct vio_dev *viodev = to_vio_dev(dev); | |
965 | size_t new_desired; | |
966 | int ret; | |
967 | ||
968 | ret = strict_strtoul(buf, 10, &new_desired); | |
969 | if (ret) | |
970 | return ret; | |
971 | ||
972 | vio_cmo_set_dev_desired(viodev, new_desired); | |
973 | return count; | |
974 | } | |
975 | ||
976 | viodev_cmo_rd_attr(desired); | |
977 | viodev_cmo_rd_attr(entitled); | |
978 | viodev_cmo_rd_attr(allocated); | |
979 | ||
980 | static ssize_t name_show(struct device *, struct device_attribute *, char *); | |
981 | static ssize_t devspec_show(struct device *, struct device_attribute *, char *); | |
578b7cd1 BH |
982 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
983 | char *buf); | |
a90ab95a RJ |
984 | static struct device_attribute vio_cmo_dev_attrs[] = { |
985 | __ATTR_RO(name), | |
986 | __ATTR_RO(devspec), | |
578b7cd1 | 987 | __ATTR_RO(modalias), |
a90ab95a RJ |
988 | __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, |
989 | viodev_cmo_desired_show, viodev_cmo_desired_set), | |
990 | __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), | |
991 | __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL), | |
992 | __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, | |
993 | viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset), | |
994 | __ATTR_NULL | |
995 | }; | |
996 | ||
997 | /* sysfs bus functions and data structures for CMO */ | |
998 | ||
999 | #define viobus_cmo_rd_attr(name) \ | |
1000 | static ssize_t \ | |
1001 | viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \ | |
1002 | { \ | |
1003 | return sprintf(buf, "%lu\n", vio_cmo.name); \ | |
1004 | } | |
1005 | ||
1006 | #define viobus_cmo_pool_rd_attr(name, var) \ | |
1007 | static ssize_t \ | |
1008 | viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \ | |
1009 | { \ | |
1010 | return sprintf(buf, "%lu\n", vio_cmo.name.var); \ | |
1011 | } | |
1012 | ||
1013 | static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf, | |
1014 | size_t count) | |
1015 | { | |
1016 | unsigned long flags; | |
1017 | ||
1018 | spin_lock_irqsave(&vio_cmo.lock, flags); | |
1019 | vio_cmo.high = vio_cmo.curr; | |
1020 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
1021 | ||
1022 | return count; | |
1023 | } | |
1024 | ||
1025 | viobus_cmo_rd_attr(entitled); | |
1026 | viobus_cmo_pool_rd_attr(reserve, size); | |
1027 | viobus_cmo_pool_rd_attr(excess, size); | |
1028 | viobus_cmo_pool_rd_attr(excess, free); | |
1029 | viobus_cmo_rd_attr(spare); | |
1030 | viobus_cmo_rd_attr(min); | |
1031 | viobus_cmo_rd_attr(desired); | |
1032 | viobus_cmo_rd_attr(curr); | |
1033 | viobus_cmo_rd_attr(high); | |
1034 | ||
1035 | static struct bus_attribute vio_cmo_bus_attrs[] = { | |
1036 | __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL), | |
1037 | __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL), | |
1038 | __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL), | |
1039 | __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL), | |
1040 | __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL), | |
1041 | __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL), | |
1042 | __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL), | |
1043 | __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL), | |
1044 | __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, | |
1045 | viobus_cmo_high_show, viobus_cmo_high_reset), | |
1046 | __ATTR_NULL | |
1047 | }; | |
1048 | ||
1049 | static void vio_cmo_sysfs_init(void) | |
1050 | { | |
1051 | vio_bus_type.dev_attrs = vio_cmo_dev_attrs; | |
1052 | vio_bus_type.bus_attrs = vio_cmo_bus_attrs; | |
1053 | } | |
1054 | #else /* CONFIG_PPC_SMLPAR */ | |
a90ab95a RJ |
1055 | int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } |
1056 | void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} | |
1057 | static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } | |
1058 | static void vio_cmo_bus_remove(struct vio_dev *viodev) {} | |
1059 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} | |
b9fa49a9 NL |
1060 | static void vio_cmo_bus_init(void) {} |
1061 | static void vio_cmo_sysfs_init(void) { } | |
a90ab95a RJ |
1062 | #endif /* CONFIG_PPC_SMLPAR */ |
1063 | EXPORT_SYMBOL(vio_cmo_entitlement_update); | |
1064 | EXPORT_SYMBOL(vio_cmo_set_dev_desired); | |
1065 | ||
f2ab6219 KY |
1066 | |
1067 | /* | |
1068 | * Platform Facilities Option (PFO) support | |
1069 | */ | |
1070 | ||
1071 | /** | |
1072 | * vio_h_cop_sync - Perform a synchronous PFO co-processor operation | |
1073 | * | |
1074 | * @vdev - Pointer to a struct vio_dev for device | |
1075 | * @op - Pointer to a struct vio_pfo_op for the operation parameters | |
1076 | * | |
1077 | * Calls the hypervisor to synchronously perform the PFO operation | |
1078 | * described in @op. In the case of a busy response from the hypervisor, | |
1079 | * the operation will be re-submitted indefinitely unless a non-zero timeout | |
1080 | * is specified or an error occurs. The timeout places a limit on when to | |
1081 | * stop re-submitting a operation, the total time can be exceeded if an | |
1082 | * operation is in progress. | |
1083 | * | |
1084 | * If op->hcall_ret is not NULL, this will be set to the return from the | |
1085 | * last h_cop_op call or it will be 0 if an error not involving the h_call | |
1086 | * was encountered. | |
1087 | * | |
1088 | * Returns: | |
1089 | * 0 on success, | |
1090 | * -EINVAL if the h_call fails due to an invalid parameter, | |
1091 | * -E2BIG if the h_call can not be performed synchronously, | |
1092 | * -EBUSY if a timeout is specified and has elapsed, | |
1093 | * -EACCES if the memory area for data/status has been rescinded, or | |
1094 | * -EPERM if a hardware fault has been indicated | |
1095 | */ | |
1096 | int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op) | |
1097 | { | |
1098 | struct device *dev = &vdev->dev; | |
1099 | unsigned long deadline = 0; | |
1100 | long hret = 0; | |
1101 | int ret = 0; | |
1102 | ||
1103 | if (op->timeout) | |
1104 | deadline = jiffies + msecs_to_jiffies(op->timeout); | |
1105 | ||
1106 | while (true) { | |
1107 | hret = plpar_hcall_norets(H_COP, op->flags, | |
1108 | vdev->resource_id, | |
1109 | op->in, op->inlen, op->out, | |
1110 | op->outlen, op->csbcpb); | |
1111 | ||
1112 | if (hret == H_SUCCESS || | |
1113 | (hret != H_NOT_ENOUGH_RESOURCES && | |
1114 | hret != H_BUSY && hret != H_RESOURCE) || | |
1115 | (op->timeout && time_after(deadline, jiffies))) | |
1116 | break; | |
1117 | ||
1118 | dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret); | |
1119 | } | |
1120 | ||
1121 | switch (hret) { | |
1122 | case H_SUCCESS: | |
1123 | ret = 0; | |
1124 | break; | |
1125 | case H_OP_MODE: | |
1126 | case H_TOO_BIG: | |
1127 | ret = -E2BIG; | |
1128 | break; | |
1129 | case H_RESCINDED: | |
1130 | ret = -EACCES; | |
1131 | break; | |
1132 | case H_HARDWARE: | |
1133 | ret = -EPERM; | |
1134 | break; | |
1135 | case H_NOT_ENOUGH_RESOURCES: | |
1136 | case H_RESOURCE: | |
1137 | case H_BUSY: | |
1138 | ret = -EBUSY; | |
1139 | break; | |
1140 | default: | |
1141 | ret = -EINVAL; | |
1142 | break; | |
1143 | } | |
1144 | ||
1145 | if (ret) | |
1146 | dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n", | |
1147 | __func__, ret, hret); | |
1148 | ||
1149 | op->hcall_err = hret; | |
1150 | return ret; | |
1151 | } | |
1152 | EXPORT_SYMBOL(vio_h_cop_sync); | |
1153 | ||
c7f0e8cb SR |
1154 | static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) |
1155 | { | |
dd9b67ab SR |
1156 | const unsigned char *dma_window; |
1157 | struct iommu_table *tbl; | |
1158 | unsigned long offset, size; | |
1159 | ||
58f9b0b0 | 1160 | dma_window = of_get_property(dev->dev.of_node, |
dd9b67ab SR |
1161 | "ibm,my-dma-window", NULL); |
1162 | if (!dma_window) | |
1163 | return NULL; | |
c7f0e8cb | 1164 | |
7aa241fd | 1165 | tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); |
0f337274 | 1166 | if (tbl == NULL) |
1167 | return NULL; | |
c7f0e8cb | 1168 | |
58f9b0b0 | 1169 | of_parse_dma_window(dev->dev.of_node, dma_window, |
dd9b67ab | 1170 | &tbl->it_index, &offset, &size); |
c7f0e8cb | 1171 | |
dd9b67ab SR |
1172 | /* TCE table size - measured in tce entries */ |
1173 | tbl->it_size = size >> IOMMU_PAGE_SHIFT; | |
1174 | /* offset for VIO should always be 0 */ | |
1175 | tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; | |
1176 | tbl->it_busno = 0; | |
1177 | tbl->it_type = TCE_VB; | |
7aa241fd | 1178 | tbl->it_blocksize = 16; |
c7f0e8cb | 1179 | |
dd9b67ab | 1180 | return iommu_init_table(tbl, -1); |
c7f0e8cb | 1181 | } |
1da177e4 | 1182 | |
e10fa773 SR |
1183 | /** |
1184 | * vio_match_device: - Tell if a VIO device has a matching | |
1185 | * VIO device id structure. | |
1186 | * @ids: array of VIO device id structures to search in | |
1187 | * @dev: the VIO device structure to match against | |
1188 | * | |
1189 | * Used by a driver to check whether a VIO device present in the | |
1190 | * system is in its list of supported devices. Returns the matching | |
1191 | * vio_device_id structure or NULL if there is no match. | |
1192 | */ | |
1193 | static const struct vio_device_id *vio_match_device( | |
1194 | const struct vio_device_id *ids, const struct vio_dev *dev) | |
1195 | { | |
1196 | while (ids->type[0] != '\0') { | |
dd721ffd | 1197 | if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && |
58f9b0b0 | 1198 | of_device_is_compatible(dev->dev.of_node, |
12d04eef | 1199 | ids->compat)) |
e10fa773 SR |
1200 | return ids; |
1201 | ids++; | |
1202 | } | |
1203 | return NULL; | |
1204 | } | |
1205 | ||
5c0b4b87 SR |
1206 | /* |
1207 | * Convert from struct device to struct vio_dev and pass to driver. | |
1da177e4 | 1208 | * dev->driver has already been set by generic code because vio_bus_match |
5c0b4b87 SR |
1209 | * succeeded. |
1210 | */ | |
1da177e4 LT |
1211 | static int vio_bus_probe(struct device *dev) |
1212 | { | |
1213 | struct vio_dev *viodev = to_vio_dev(dev); | |
1214 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | |
1215 | const struct vio_device_id *id; | |
1216 | int error = -ENODEV; | |
1217 | ||
1da177e4 LT |
1218 | if (!viodrv->probe) |
1219 | return error; | |
1220 | ||
1221 | id = vio_match_device(viodrv->id_table, viodev); | |
a90ab95a RJ |
1222 | if (id) { |
1223 | memset(&viodev->cmo, 0, sizeof(viodev->cmo)); | |
1224 | if (firmware_has_feature(FW_FEATURE_CMO)) { | |
1225 | error = vio_cmo_bus_probe(viodev); | |
1226 | if (error) | |
1227 | return error; | |
1228 | } | |
1da177e4 | 1229 | error = viodrv->probe(viodev, id); |
cd5aeb9f | 1230 | if (error && firmware_has_feature(FW_FEATURE_CMO)) |
a90ab95a RJ |
1231 | vio_cmo_bus_remove(viodev); |
1232 | } | |
1da177e4 LT |
1233 | |
1234 | return error; | |
1235 | } | |
1236 | ||
1237 | /* convert from struct device to struct vio_dev and pass to driver. */ | |
1238 | static int vio_bus_remove(struct device *dev) | |
1239 | { | |
1240 | struct vio_dev *viodev = to_vio_dev(dev); | |
1241 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | |
a90ab95a RJ |
1242 | struct device *devptr; |
1243 | int ret = 1; | |
1244 | ||
1245 | /* | |
1246 | * Hold a reference to the device after the remove function is called | |
1247 | * to allow for CMO accounting cleanup for the device. | |
1248 | */ | |
1249 | devptr = get_device(dev); | |
1da177e4 | 1250 | |
5c0b4b87 | 1251 | if (viodrv->remove) |
a90ab95a | 1252 | ret = viodrv->remove(viodev); |
1da177e4 | 1253 | |
a90ab95a RJ |
1254 | if (!ret && firmware_has_feature(FW_FEATURE_CMO)) |
1255 | vio_cmo_bus_remove(viodev); | |
1256 | ||
1257 | put_device(devptr); | |
1258 | return ret; | |
1da177e4 LT |
1259 | } |
1260 | ||
1261 | /** | |
1262 | * vio_register_driver: - Register a new vio driver | |
f0a875fd | 1263 | * @viodrv: The vio_driver structure to be registered. |
1da177e4 | 1264 | */ |
cb52d897 BH |
1265 | int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, |
1266 | const char *mod_name) | |
1da177e4 | 1267 | { |
cb52d897 | 1268 | pr_debug("%s: driver %s registering\n", __func__, viodrv->name); |
1da177e4 LT |
1269 | |
1270 | /* fill in 'struct driver' fields */ | |
cb52d897 BH |
1271 | viodrv->driver.name = viodrv->name; |
1272 | viodrv->driver.pm = viodrv->pm; | |
1da177e4 | 1273 | viodrv->driver.bus = &vio_bus_type; |
cb52d897 BH |
1274 | viodrv->driver.owner = owner; |
1275 | viodrv->driver.mod_name = mod_name; | |
1da177e4 LT |
1276 | |
1277 | return driver_register(&viodrv->driver); | |
1278 | } | |
cb52d897 | 1279 | EXPORT_SYMBOL(__vio_register_driver); |
1da177e4 LT |
1280 | |
1281 | /** | |
1282 | * vio_unregister_driver - Remove registration of vio driver. | |
f0a875fd | 1283 | * @viodrv: The vio_driver struct to be removed form registration |
1da177e4 LT |
1284 | */ |
1285 | void vio_unregister_driver(struct vio_driver *viodrv) | |
1286 | { | |
1287 | driver_unregister(&viodrv->driver); | |
1288 | } | |
1289 | EXPORT_SYMBOL(vio_unregister_driver); | |
1290 | ||
c7f0e8cb | 1291 | /* vio_dev refcount hit 0 */ |
cad5cef6 | 1292 | static void vio_dev_release(struct device *dev) |
c7f0e8cb | 1293 | { |
45848e0f NA |
1294 | struct iommu_table *tbl = get_iommu_table_base(dev); |
1295 | ||
b0787660 | 1296 | if (tbl) |
74a7f084 | 1297 | iommu_free_table(tbl, of_node_full_name(dev->of_node)); |
58f9b0b0 | 1298 | of_node_put(dev->of_node); |
c7f0e8cb SR |
1299 | kfree(to_vio_dev(dev)); |
1300 | } | |
1301 | ||
1da177e4 | 1302 | /** |
e10fa773 SR |
1303 | * vio_register_device_node: - Register a new vio device. |
1304 | * @of_node: The OF node for this device. | |
1da177e4 | 1305 | * |
e10fa773 | 1306 | * Creates and initializes a vio_dev structure from the data in |
12d04eef | 1307 | * of_node and adds it to the list of virtual devices. |
e10fa773 SR |
1308 | * Returns a pointer to the created vio_dev or NULL if node has |
1309 | * NULL device_type or compatible fields. | |
1da177e4 | 1310 | */ |
de7d812d | 1311 | struct vio_dev *vio_register_device_node(struct device_node *of_node) |
1da177e4 | 1312 | { |
e10fa773 | 1313 | struct vio_dev *viodev; |
f2ab6219 | 1314 | struct device_node *parent_node; |
a7f67bdf | 1315 | const unsigned int *unit_address; |
f2ab6219 KY |
1316 | const unsigned int *pfo_resid = NULL; |
1317 | enum vio_dev_family family; | |
1318 | const char *of_node_name = of_node->name ? of_node->name : "<unknown>"; | |
e10fa773 | 1319 | |
f2ab6219 KY |
1320 | /* |
1321 | * Determine if this node is a under the /vdevice node or under the | |
1322 | * /ibm,platform-facilities node. This decides the device's family. | |
1323 | */ | |
1324 | parent_node = of_get_parent(of_node); | |
1325 | if (parent_node) { | |
1326 | if (!strcmp(parent_node->full_name, "/ibm,platform-facilities")) | |
1327 | family = PFO; | |
1328 | else if (!strcmp(parent_node->full_name, "/vdevice")) | |
1329 | family = VDEVICE; | |
1330 | else { | |
1331 | pr_warn("%s: parent(%s) of %s not recognized.\n", | |
1332 | __func__, | |
1333 | parent_node->full_name, | |
1334 | of_node_name); | |
1335 | of_node_put(parent_node); | |
1336 | return NULL; | |
1337 | } | |
1338 | of_node_put(parent_node); | |
1339 | } else { | |
1340 | pr_warn("%s: could not determine the parent of node %s.\n", | |
1341 | __func__, of_node_name); | |
e10fa773 | 1342 | return NULL; |
1da177e4 | 1343 | } |
e10fa773 | 1344 | |
f2ab6219 KY |
1345 | if (family == PFO) { |
1346 | if (of_get_property(of_node, "interrupt-controller", NULL)) { | |
1347 | pr_debug("%s: Skipping the interrupt controller %s.\n", | |
1348 | __func__, of_node_name); | |
1349 | return NULL; | |
1350 | } | |
e10fa773 SR |
1351 | } |
1352 | ||
1353 | /* allocate a vio_dev for this node */ | |
1354 | viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); | |
f2ab6219 KY |
1355 | if (viodev == NULL) { |
1356 | pr_warn("%s: allocation failure for VIO device.\n", __func__); | |
e10fa773 | 1357 | return NULL; |
f2ab6219 | 1358 | } |
e10fa773 | 1359 | |
f2ab6219 KY |
1360 | /* we need the 'device_type' property, in order to match with drivers */ |
1361 | viodev->family = family; | |
1362 | if (viodev->family == VDEVICE) { | |
1363 | if (of_node->type != NULL) | |
1364 | viodev->type = of_node->type; | |
1365 | else { | |
1366 | pr_warn("%s: node %s is missing the 'device_type' " | |
1367 | "property.\n", __func__, of_node_name); | |
1368 | goto out; | |
1369 | } | |
1370 | ||
1371 | unit_address = of_get_property(of_node, "reg", NULL); | |
1372 | if (unit_address == NULL) { | |
1373 | pr_warn("%s: node %s missing 'reg'\n", | |
1374 | __func__, of_node_name); | |
1375 | goto out; | |
1376 | } | |
1377 | dev_set_name(&viodev->dev, "%x", *unit_address); | |
1378 | viodev->irq = irq_of_parse_and_map(of_node, 0); | |
1379 | viodev->unit_address = *unit_address; | |
1380 | } else { | |
1381 | /* PFO devices need their resource_id for submitting COP_OPs | |
1382 | * This is an optional field for devices, but is required when | |
1383 | * performing synchronous ops */ | |
1384 | pfo_resid = of_get_property(of_node, "ibm,resource-id", NULL); | |
1385 | if (pfo_resid != NULL) | |
1386 | viodev->resource_id = *pfo_resid; | |
1387 | ||
1388 | unit_address = NULL; | |
1389 | dev_set_name(&viodev->dev, "%s", of_node_name); | |
1390 | viodev->type = of_node_name; | |
1391 | viodev->irq = 0; | |
1392 | } | |
e10fa773 | 1393 | |
e10fa773 | 1394 | viodev->name = of_node->name; |
d706c1b0 | 1395 | viodev->dev.of_node = of_node_get(of_node); |
a90ab95a | 1396 | |
8fae0353 | 1397 | set_dev_node(&viodev->dev, of_node_to_nid(of_node)); |
c7f0e8cb SR |
1398 | |
1399 | /* init generic 'struct device' fields: */ | |
1400 | viodev->dev.parent = &vio_bus_device.dev; | |
1401 | viodev->dev.bus = &vio_bus_type; | |
1402 | viodev->dev.release = vio_dev_release; | |
62761d1f AB |
1403 | |
1404 | if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) { | |
1405 | if (firmware_has_feature(FW_FEATURE_CMO)) | |
1406 | vio_cmo_set_dma_ops(viodev); | |
1407 | else | |
1408 | set_dma_ops(&viodev->dev, &dma_iommu_ops); | |
1409 | ||
1410 | set_iommu_table_base(&viodev->dev, | |
1411 | vio_build_iommu_table(viodev)); | |
1412 | ||
1413 | /* needed to ensure proper operation of coherent allocations | |
1414 | * later, in case driver doesn't set it explicitly */ | |
1415 | dma_set_mask(&viodev->dev, DMA_BIT_MASK(64)); | |
1416 | dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64)); | |
1417 | } | |
e10fa773 SR |
1418 | |
1419 | /* register with generic device framework */ | |
c7f0e8cb SR |
1420 | if (device_register(&viodev->dev)) { |
1421 | printk(KERN_ERR "%s: failed to register device %s\n", | |
aab0d375 | 1422 | __func__, dev_name(&viodev->dev)); |
edea8f6f | 1423 | put_device(&viodev->dev); |
e10fa773 SR |
1424 | return NULL; |
1425 | } | |
1426 | ||
1427 | return viodev; | |
f2ab6219 KY |
1428 | |
1429 | out: /* Use this exit point for any return prior to device_register */ | |
1430 | kfree(viodev); | |
1431 | ||
1432 | return NULL; | |
1da177e4 | 1433 | } |
e10fa773 | 1434 | EXPORT_SYMBOL(vio_register_device_node); |
1da177e4 | 1435 | |
f2ab6219 KY |
1436 | /* |
1437 | * vio_bus_scan_for_devices - Scan OF and register each child device | |
1438 | * @root_name - OF node name for the root of the subtree to search. | |
1439 | * This must be non-NULL | |
1440 | * | |
1441 | * Starting from the root node provide, register the device node for | |
1442 | * each child beneath the root. | |
1443 | */ | |
1444 | static void vio_bus_scan_register_devices(char *root_name) | |
1445 | { | |
1446 | struct device_node *node_root, *node_child; | |
1447 | ||
1448 | if (!root_name) | |
1449 | return; | |
1450 | ||
1451 | node_root = of_find_node_by_name(NULL, root_name); | |
1452 | if (node_root) { | |
1453 | ||
1454 | /* | |
1455 | * Create struct vio_devices for each virtual device in | |
1456 | * the device tree. Drivers will associate with them later. | |
1457 | */ | |
1458 | node_child = of_get_next_child(node_root, NULL); | |
1459 | while (node_child) { | |
1460 | vio_register_device_node(node_child); | |
1461 | node_child = of_get_next_child(node_root, node_child); | |
1462 | } | |
1463 | of_node_put(node_root); | |
1464 | } | |
1465 | } | |
1466 | ||
1da177e4 LT |
1467 | /** |
1468 | * vio_bus_init: - Initialize the virtual IO bus | |
1469 | */ | |
c7f0e8cb | 1470 | static int __init vio_bus_init(void) |
1da177e4 LT |
1471 | { |
1472 | int err; | |
1473 | ||
a90ab95a RJ |
1474 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1475 | vio_cmo_sysfs_init(); | |
1476 | ||
1da177e4 LT |
1477 | err = bus_register(&vio_bus_type); |
1478 | if (err) { | |
1479 | printk(KERN_ERR "failed to register VIO bus\n"); | |
1480 | return err; | |
1481 | } | |
1482 | ||
5c0b4b87 SR |
1483 | /* |
1484 | * The fake parent of all vio devices, just to give us | |
3e494c80 SR |
1485 | * a nice directory |
1486 | */ | |
ac5b33c9 | 1487 | err = device_register(&vio_bus_device.dev); |
1da177e4 | 1488 | if (err) { |
3e494c80 | 1489 | printk(KERN_WARNING "%s: device_register returned %i\n", |
e48b1b45 | 1490 | __func__, err); |
1da177e4 LT |
1491 | return err; |
1492 | } | |
1493 | ||
a90ab95a RJ |
1494 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1495 | vio_cmo_bus_init(); | |
1496 | ||
44b372d8 AB |
1497 | return 0; |
1498 | } | |
1499 | postcore_initcall(vio_bus_init); | |
1500 | ||
1501 | static int __init vio_device_init(void) | |
1502 | { | |
f2ab6219 KY |
1503 | vio_bus_scan_register_devices("vdevice"); |
1504 | vio_bus_scan_register_devices("ibm,platform-facilities"); | |
e10fa773 | 1505 | |
3e494c80 SR |
1506 | return 0; |
1507 | } | |
44b372d8 | 1508 | device_initcall(vio_device_init); |
1da177e4 | 1509 | |
e10fa773 | 1510 | static ssize_t name_show(struct device *dev, |
5c0b4b87 | 1511 | struct device_attribute *attr, char *buf) |
1da177e4 LT |
1512 | { |
1513 | return sprintf(buf, "%s\n", to_vio_dev(dev)->name); | |
1514 | } | |
e10fa773 SR |
1515 | |
1516 | static ssize_t devspec_show(struct device *dev, | |
1517 | struct device_attribute *attr, char *buf) | |
1518 | { | |
58f9b0b0 | 1519 | struct device_node *of_node = dev->of_node; |
e10fa773 | 1520 | |
74a7f084 | 1521 | return sprintf(buf, "%s\n", of_node_full_name(of_node)); |
e10fa773 SR |
1522 | } |
1523 | ||
578b7cd1 BH |
1524 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
1525 | char *buf) | |
1526 | { | |
1527 | const struct vio_dev *vio_dev = to_vio_dev(dev); | |
1528 | struct device_node *dn; | |
1529 | const char *cp; | |
1530 | ||
cf9b59e9 | 1531 | dn = dev->of_node; |
578b7cd1 BH |
1532 | if (!dn) |
1533 | return -ENODEV; | |
1534 | cp = of_get_property(dn, "compatible", NULL); | |
1535 | if (!cp) | |
1536 | return -ENODEV; | |
1537 | ||
1538 | return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); | |
1539 | } | |
1540 | ||
e10fa773 SR |
1541 | static struct device_attribute vio_dev_attrs[] = { |
1542 | __ATTR_RO(name), | |
1543 | __ATTR_RO(devspec), | |
578b7cd1 | 1544 | __ATTR_RO(modalias), |
e10fa773 SR |
1545 | __ATTR_NULL |
1546 | }; | |
1da177e4 | 1547 | |
cad5cef6 | 1548 | void vio_unregister_device(struct vio_dev *viodev) |
1da177e4 | 1549 | { |
1da177e4 LT |
1550 | device_unregister(&viodev->dev); |
1551 | } | |
1552 | EXPORT_SYMBOL(vio_unregister_device); | |
1553 | ||
1da177e4 LT |
1554 | static int vio_bus_match(struct device *dev, struct device_driver *drv) |
1555 | { | |
1556 | const struct vio_dev *vio_dev = to_vio_dev(dev); | |
1557 | struct vio_driver *vio_drv = to_vio_driver(drv); | |
1558 | const struct vio_device_id *ids = vio_drv->id_table; | |
1da177e4 | 1559 | |
5c0b4b87 | 1560 | return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); |
1da177e4 LT |
1561 | } |
1562 | ||
7eff2e7a | 1563 | static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) |
143dcec2 OH |
1564 | { |
1565 | const struct vio_dev *vio_dev = to_vio_dev(dev); | |
12d04eef | 1566 | struct device_node *dn; |
a7f67bdf | 1567 | const char *cp; |
143dcec2 | 1568 | |
58f9b0b0 | 1569 | dn = dev->of_node; |
e10fa773 | 1570 | if (!dn) |
143dcec2 | 1571 | return -ENODEV; |
7eff2e7a | 1572 | cp = of_get_property(dn, "compatible", NULL); |
143dcec2 OH |
1573 | if (!cp) |
1574 | return -ENODEV; | |
1575 | ||
7eff2e7a | 1576 | add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); |
143dcec2 OH |
1577 | return 0; |
1578 | } | |
1579 | ||
a9803497 | 1580 | struct bus_type vio_bus_type = { |
1da177e4 | 1581 | .name = "vio", |
e10fa773 | 1582 | .dev_attrs = vio_dev_attrs, |
312c004d | 1583 | .uevent = vio_hotplug, |
1da177e4 | 1584 | .match = vio_bus_match, |
2f53a80f RK |
1585 | .probe = vio_bus_probe, |
1586 | .remove = vio_bus_remove, | |
1da177e4 | 1587 | }; |
e10fa773 SR |
1588 | |
1589 | /** | |
1590 | * vio_get_attribute: - get attribute for virtual device | |
1591 | * @vdev: The vio device to get property. | |
1592 | * @which: The property/attribute to be extracted. | |
1593 | * @length: Pointer to length of returned data size (unused if NULL). | |
1594 | * | |
e2eb6392 | 1595 | * Calls prom.c's of_get_property() to return the value of the |
e10fa773 SR |
1596 | * attribute specified by @which |
1597 | */ | |
1598 | const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) | |
1599 | { | |
58f9b0b0 | 1600 | return of_get_property(vdev->dev.of_node, which, length); |
e10fa773 SR |
1601 | } |
1602 | EXPORT_SYMBOL(vio_get_attribute); | |
c7f0e8cb SR |
1603 | |
1604 | #ifdef CONFIG_PPC_PSERIES | |
1605 | /* vio_find_name() - internal because only vio.c knows how we formatted the | |
1606 | * kobject name | |
c7f0e8cb | 1607 | */ |
c847c853 | 1608 | static struct vio_dev *vio_find_name(const char *name) |
c7f0e8cb | 1609 | { |
c847c853 | 1610 | struct device *found; |
c7f0e8cb | 1611 | |
c847c853 | 1612 | found = bus_find_device_by_name(&vio_bus_type, NULL, name); |
c7f0e8cb SR |
1613 | if (!found) |
1614 | return NULL; | |
1615 | ||
c847c853 | 1616 | return to_vio_dev(found); |
c7f0e8cb SR |
1617 | } |
1618 | ||
1619 | /** | |
1620 | * vio_find_node - find an already-registered vio_dev | |
1621 | * @vnode: device_node of the virtual device we're looking for | |
1622 | */ | |
1623 | struct vio_dev *vio_find_node(struct device_node *vnode) | |
1624 | { | |
a7f67bdf | 1625 | const uint32_t *unit_address; |
aab0d375 | 1626 | char kobj_name[20]; |
f2ab6219 KY |
1627 | struct device_node *vnode_parent; |
1628 | const char *dev_type; | |
1629 | ||
1630 | vnode_parent = of_get_parent(vnode); | |
1631 | if (!vnode_parent) | |
1632 | return NULL; | |
1633 | ||
1634 | dev_type = of_get_property(vnode_parent, "device_type", NULL); | |
1635 | of_node_put(vnode_parent); | |
1636 | if (!dev_type) | |
1637 | return NULL; | |
c7f0e8cb SR |
1638 | |
1639 | /* construct the kobject name from the device node */ | |
f2ab6219 KY |
1640 | if (!strcmp(dev_type, "vdevice")) { |
1641 | unit_address = of_get_property(vnode, "reg", NULL); | |
1642 | if (!unit_address) | |
1643 | return NULL; | |
1644 | snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address); | |
1645 | } else if (!strcmp(dev_type, "ibm,platform-facilities")) | |
1646 | snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name); | |
1647 | else | |
c7f0e8cb | 1648 | return NULL; |
c7f0e8cb SR |
1649 | |
1650 | return vio_find_name(kobj_name); | |
1651 | } | |
1652 | EXPORT_SYMBOL(vio_find_node); | |
1653 | ||
1654 | int vio_enable_interrupts(struct vio_dev *dev) | |
1655 | { | |
1656 | int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); | |
1657 | if (rc != H_SUCCESS) | |
1658 | printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); | |
1659 | return rc; | |
1660 | } | |
1661 | EXPORT_SYMBOL(vio_enable_interrupts); | |
1662 | ||
1663 | int vio_disable_interrupts(struct vio_dev *dev) | |
1664 | { | |
1665 | int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); | |
1666 | if (rc != H_SUCCESS) | |
1667 | printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); | |
1668 | return rc; | |
1669 | } | |
1670 | EXPORT_SYMBOL(vio_disable_interrupts); | |
1671 | #endif /* CONFIG_PPC_PSERIES */ |