1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16 #include <linux/of_device.h>
18 #include <asm/iommu.h>
20 #include <asm/hypervisor.h>
24 #include "iommu_common.h"
26 #include "pci_sun4v.h"
28 #define DRIVER_NAME "pci_sun4v"
29 #define PFX DRIVER_NAME ": "
31 static unsigned long vpci_major
= 1;
32 static unsigned long vpci_minor
= 1;
34 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
37 struct device
*dev
; /* Device mapping is for. */
38 unsigned long prot
; /* IOMMU page protections */
39 unsigned long entry
; /* Index into IOTSB. */
40 u64
*pglist
; /* List of physical pages */
41 unsigned long npages
; /* Number of pages in list. */
44 static DEFINE_PER_CPU(struct iommu_batch
, iommu_batch
);
46 /* Interrupts must be disabled. */
47 static inline void iommu_batch_start(struct device
*dev
, unsigned long prot
, unsigned long entry
)
49 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
57 /* Interrupts must be disabled. */
58 static long iommu_batch_flush(struct iommu_batch
*p
)
60 struct pci_pbm_info
*pbm
= p
->dev
->archdata
.host_controller
;
61 unsigned long devhandle
= pbm
->devhandle
;
62 unsigned long prot
= p
->prot
;
63 unsigned long entry
= p
->entry
;
64 u64
*pglist
= p
->pglist
;
65 unsigned long npages
= p
->npages
;
70 num
= pci_sun4v_iommu_map(devhandle
, HV_PCI_TSBID(0, entry
),
71 npages
, prot
, __pa(pglist
));
72 if (unlikely(num
< 0)) {
73 if (printk_ratelimit())
74 printk("iommu_batch_flush: IOMMU map of "
75 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
77 devhandle
, HV_PCI_TSBID(0, entry
),
78 npages
, prot
, __pa(pglist
), num
);
93 static inline void iommu_batch_new_entry(unsigned long entry
)
95 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
97 if (p
->entry
+ p
->npages
== entry
)
100 iommu_batch_flush(p
);
104 /* Interrupts must be disabled. */
105 static inline long iommu_batch_add(u64 phys_page
)
107 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
109 BUG_ON(p
->npages
>= PGLIST_NENTS
);
111 p
->pglist
[p
->npages
++] = phys_page
;
112 if (p
->npages
== PGLIST_NENTS
)
113 return iommu_batch_flush(p
);
118 /* Interrupts must be disabled. */
119 static inline long iommu_batch_end(void)
121 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
123 BUG_ON(p
->npages
>= PGLIST_NENTS
);
125 return iommu_batch_flush(p
);
128 static void *dma_4v_alloc_coherent(struct device
*dev
, size_t size
,
129 dma_addr_t
*dma_addrp
, gfp_t gfp
)
131 unsigned long flags
, order
, first_page
, npages
, n
;
138 size
= IO_PAGE_ALIGN(size
);
139 order
= get_order(size
);
140 if (unlikely(order
>= MAX_ORDER
))
143 npages
= size
>> IO_PAGE_SHIFT
;
145 nid
= dev
->archdata
.numa_node
;
146 page
= alloc_pages_node(nid
, gfp
, order
);
150 first_page
= (unsigned long) page_address(page
);
151 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
153 iommu
= dev
->archdata
.iommu
;
155 spin_lock_irqsave(&iommu
->lock
, flags
);
156 entry
= iommu_range_alloc(dev
, iommu
, npages
, NULL
);
157 spin_unlock_irqrestore(&iommu
->lock
, flags
);
159 if (unlikely(entry
== DMA_ERROR_CODE
))
160 goto range_alloc_fail
;
162 *dma_addrp
= (iommu
->page_table_map_base
+
163 (entry
<< IO_PAGE_SHIFT
));
164 ret
= (void *) first_page
;
165 first_page
= __pa(first_page
);
167 local_irq_save(flags
);
169 iommu_batch_start(dev
,
170 (HV_PCI_MAP_ATTR_READ
|
171 HV_PCI_MAP_ATTR_WRITE
),
174 for (n
= 0; n
< npages
; n
++) {
175 long err
= iommu_batch_add(first_page
+ (n
* PAGE_SIZE
));
176 if (unlikely(err
< 0L))
180 if (unlikely(iommu_batch_end() < 0L))
183 local_irq_restore(flags
);
188 /* Interrupts are disabled. */
189 spin_lock(&iommu
->lock
);
190 iommu_range_free(iommu
, *dma_addrp
, npages
);
191 spin_unlock_irqrestore(&iommu
->lock
, flags
);
194 free_pages(first_page
, order
);
198 static void dma_4v_free_coherent(struct device
*dev
, size_t size
, void *cpu
,
201 struct pci_pbm_info
*pbm
;
203 unsigned long flags
, order
, npages
, entry
;
206 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
207 iommu
= dev
->archdata
.iommu
;
208 pbm
= dev
->archdata
.host_controller
;
209 devhandle
= pbm
->devhandle
;
210 entry
= ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
212 spin_lock_irqsave(&iommu
->lock
, flags
);
214 iommu_range_free(iommu
, dvma
, npages
);
219 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
223 } while (npages
!= 0);
225 spin_unlock_irqrestore(&iommu
->lock
, flags
);
227 order
= get_order(size
);
229 free_pages((unsigned long)cpu
, order
);
232 static dma_addr_t
dma_4v_map_single(struct device
*dev
, void *ptr
, size_t sz
,
233 enum dma_data_direction direction
)
236 unsigned long flags
, npages
, oaddr
;
237 unsigned long i
, base_paddr
;
242 iommu
= dev
->archdata
.iommu
;
244 if (unlikely(direction
== DMA_NONE
))
247 oaddr
= (unsigned long)ptr
;
248 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
249 npages
>>= IO_PAGE_SHIFT
;
251 spin_lock_irqsave(&iommu
->lock
, flags
);
252 entry
= iommu_range_alloc(dev
, iommu
, npages
, NULL
);
253 spin_unlock_irqrestore(&iommu
->lock
, flags
);
255 if (unlikely(entry
== DMA_ERROR_CODE
))
258 bus_addr
= (iommu
->page_table_map_base
+
259 (entry
<< IO_PAGE_SHIFT
));
260 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
261 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
262 prot
= HV_PCI_MAP_ATTR_READ
;
263 if (direction
!= DMA_TO_DEVICE
)
264 prot
|= HV_PCI_MAP_ATTR_WRITE
;
266 local_irq_save(flags
);
268 iommu_batch_start(dev
, prot
, entry
);
270 for (i
= 0; i
< npages
; i
++, base_paddr
+= IO_PAGE_SIZE
) {
271 long err
= iommu_batch_add(base_paddr
);
272 if (unlikely(err
< 0L))
275 if (unlikely(iommu_batch_end() < 0L))
278 local_irq_restore(flags
);
283 if (printk_ratelimit())
285 return DMA_ERROR_CODE
;
288 /* Interrupts are disabled. */
289 spin_lock(&iommu
->lock
);
290 iommu_range_free(iommu
, bus_addr
, npages
);
291 spin_unlock_irqrestore(&iommu
->lock
, flags
);
293 return DMA_ERROR_CODE
;
296 static void dma_4v_unmap_single(struct device
*dev
, dma_addr_t bus_addr
,
297 size_t sz
, enum dma_data_direction direction
)
299 struct pci_pbm_info
*pbm
;
301 unsigned long flags
, npages
;
305 if (unlikely(direction
== DMA_NONE
)) {
306 if (printk_ratelimit())
311 iommu
= dev
->archdata
.iommu
;
312 pbm
= dev
->archdata
.host_controller
;
313 devhandle
= pbm
->devhandle
;
315 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
316 npages
>>= IO_PAGE_SHIFT
;
317 bus_addr
&= IO_PAGE_MASK
;
319 spin_lock_irqsave(&iommu
->lock
, flags
);
321 iommu_range_free(iommu
, bus_addr
, npages
);
323 entry
= (bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
;
327 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
331 } while (npages
!= 0);
333 spin_unlock_irqrestore(&iommu
->lock
, flags
);
336 static int dma_4v_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
337 int nelems
, enum dma_data_direction direction
)
339 struct scatterlist
*s
, *outs
, *segstart
;
340 unsigned long flags
, handle
, prot
;
341 dma_addr_t dma_next
= 0, dma_addr
;
342 unsigned int max_seg_size
;
343 unsigned long seg_boundary_size
;
344 int outcount
, incount
, i
;
346 unsigned long base_shift
;
349 BUG_ON(direction
== DMA_NONE
);
351 iommu
= dev
->archdata
.iommu
;
352 if (nelems
== 0 || !iommu
)
355 prot
= HV_PCI_MAP_ATTR_READ
;
356 if (direction
!= DMA_TO_DEVICE
)
357 prot
|= HV_PCI_MAP_ATTR_WRITE
;
359 outs
= s
= segstart
= &sglist
[0];
364 /* Init first segment length for backout at failure */
365 outs
->dma_length
= 0;
367 spin_lock_irqsave(&iommu
->lock
, flags
);
369 iommu_batch_start(dev
, prot
, ~0UL);
371 max_seg_size
= dma_get_max_seg_size(dev
);
372 seg_boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
373 IO_PAGE_SIZE
) >> IO_PAGE_SHIFT
;
374 base_shift
= iommu
->page_table_map_base
>> IO_PAGE_SHIFT
;
375 for_each_sg(sglist
, s
, nelems
, i
) {
376 unsigned long paddr
, npages
, entry
, out_entry
= 0, slen
;
384 /* Allocate iommu entries for that segment */
385 paddr
= (unsigned long) SG_ENT_PHYS_ADDRESS(s
);
386 npages
= iommu_num_pages(paddr
, slen
);
387 entry
= iommu_range_alloc(dev
, iommu
, npages
, &handle
);
390 if (unlikely(entry
== DMA_ERROR_CODE
)) {
391 if (printk_ratelimit())
392 printk(KERN_INFO
"iommu_alloc failed, iommu %p paddr %lx"
393 " npages %lx\n", iommu
, paddr
, npages
);
394 goto iommu_map_failed
;
397 iommu_batch_new_entry(entry
);
399 /* Convert entry to a dma_addr_t */
400 dma_addr
= iommu
->page_table_map_base
+
401 (entry
<< IO_PAGE_SHIFT
);
402 dma_addr
|= (s
->offset
& ~IO_PAGE_MASK
);
404 /* Insert into HW table */
405 paddr
&= IO_PAGE_MASK
;
407 err
= iommu_batch_add(paddr
);
408 if (unlikely(err
< 0L))
409 goto iommu_map_failed
;
410 paddr
+= IO_PAGE_SIZE
;
413 /* If we are in an open segment, try merging */
415 /* We cannot merge if:
416 * - allocated dma_addr isn't contiguous to previous allocation
418 if ((dma_addr
!= dma_next
) ||
419 (outs
->dma_length
+ s
->length
> max_seg_size
) ||
420 (is_span_boundary(out_entry
, base_shift
,
421 seg_boundary_size
, outs
, s
))) {
422 /* Can't merge: create a new segment */
425 outs
= sg_next(outs
);
427 outs
->dma_length
+= s
->length
;
432 /* This is a new segment, fill entries */
433 outs
->dma_address
= dma_addr
;
434 outs
->dma_length
= slen
;
438 /* Calculate next page pointer for contiguous check */
439 dma_next
= dma_addr
+ slen
;
442 err
= iommu_batch_end();
444 if (unlikely(err
< 0L))
445 goto iommu_map_failed
;
447 spin_unlock_irqrestore(&iommu
->lock
, flags
);
449 if (outcount
< incount
) {
450 outs
= sg_next(outs
);
451 outs
->dma_address
= DMA_ERROR_CODE
;
452 outs
->dma_length
= 0;
458 for_each_sg(sglist
, s
, nelems
, i
) {
459 if (s
->dma_length
!= 0) {
460 unsigned long vaddr
, npages
;
462 vaddr
= s
->dma_address
& IO_PAGE_MASK
;
463 npages
= iommu_num_pages(s
->dma_address
, s
->dma_length
);
464 iommu_range_free(iommu
, vaddr
, npages
);
466 s
->dma_address
= DMA_ERROR_CODE
;
472 spin_unlock_irqrestore(&iommu
->lock
, flags
);
477 static void dma_4v_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
478 int nelems
, enum dma_data_direction direction
)
480 struct pci_pbm_info
*pbm
;
481 struct scatterlist
*sg
;
486 BUG_ON(direction
== DMA_NONE
);
488 iommu
= dev
->archdata
.iommu
;
489 pbm
= dev
->archdata
.host_controller
;
490 devhandle
= pbm
->devhandle
;
492 spin_lock_irqsave(&iommu
->lock
, flags
);
496 dma_addr_t dma_handle
= sg
->dma_address
;
497 unsigned int len
= sg
->dma_length
;
498 unsigned long npages
, entry
;
502 npages
= iommu_num_pages(dma_handle
, len
);
503 iommu_range_free(iommu
, dma_handle
, npages
);
505 entry
= ((dma_handle
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
509 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
518 spin_unlock_irqrestore(&iommu
->lock
, flags
);
521 static void dma_4v_sync_single_for_cpu(struct device
*dev
,
522 dma_addr_t bus_addr
, size_t sz
,
523 enum dma_data_direction direction
)
525 /* Nothing to do... */
528 static void dma_4v_sync_sg_for_cpu(struct device
*dev
,
529 struct scatterlist
*sglist
, int nelems
,
530 enum dma_data_direction direction
)
532 /* Nothing to do... */
535 static const struct dma_ops sun4v_dma_ops
= {
536 .alloc_coherent
= dma_4v_alloc_coherent
,
537 .free_coherent
= dma_4v_free_coherent
,
538 .map_single
= dma_4v_map_single
,
539 .unmap_single
= dma_4v_unmap_single
,
540 .map_sg
= dma_4v_map_sg
,
541 .unmap_sg
= dma_4v_unmap_sg
,
542 .sync_single_for_cpu
= dma_4v_sync_single_for_cpu
,
543 .sync_sg_for_cpu
= dma_4v_sync_sg_for_cpu
,
546 static void __init
pci_sun4v_scan_bus(struct pci_pbm_info
*pbm
)
548 struct property
*prop
;
549 struct device_node
*dp
;
552 prop
= of_find_property(dp
, "66mhz-capable", NULL
);
553 pbm
->is_66mhz_capable
= (prop
!= NULL
);
554 pbm
->pci_bus
= pci_scan_one_pbm(pbm
);
556 /* XXX register error interrupt handlers XXX */
559 static unsigned long __init
probe_existing_entries(struct pci_pbm_info
*pbm
,
562 struct iommu_arena
*arena
= &iommu
->arena
;
563 unsigned long i
, cnt
= 0;
566 devhandle
= pbm
->devhandle
;
567 for (i
= 0; i
< arena
->limit
; i
++) {
568 unsigned long ret
, io_attrs
, ra
;
570 ret
= pci_sun4v_iommu_getmap(devhandle
,
574 if (page_in_phys_avail(ra
)) {
575 pci_sun4v_iommu_demap(devhandle
,
576 HV_PCI_TSBID(0, i
), 1);
579 __set_bit(i
, arena
->map
);
587 static int __init
pci_sun4v_iommu_init(struct pci_pbm_info
*pbm
)
589 struct iommu
*iommu
= pbm
->iommu
;
590 struct property
*prop
;
591 unsigned long num_tsb_entries
, sz
, tsbsize
;
592 u32 vdma
[2], dma_mask
, dma_offset
;
594 prop
= of_find_property(pbm
->prom_node
, "virtual-dma", NULL
);
596 u32
*val
= prop
->value
;
601 /* No property, use default values. */
602 vdma
[0] = 0x80000000;
603 vdma
[1] = 0x80000000;
606 if ((vdma
[0] | vdma
[1]) & ~IO_PAGE_MASK
) {
607 printk(KERN_ERR PFX
"Strange virtual-dma[%08x:%08x].\n",
612 dma_mask
= (roundup_pow_of_two(vdma
[1]) - 1UL);
613 num_tsb_entries
= vdma
[1] / IO_PAGE_SIZE
;
614 tsbsize
= num_tsb_entries
* sizeof(iopte_t
);
616 dma_offset
= vdma
[0];
618 /* Setup initial software IOMMU state. */
619 spin_lock_init(&iommu
->lock
);
620 iommu
->ctx_lowest_free
= 1;
621 iommu
->page_table_map_base
= dma_offset
;
622 iommu
->dma_addr_mask
= dma_mask
;
624 /* Allocate and initialize the free area map. */
625 sz
= (num_tsb_entries
+ 7) / 8;
626 sz
= (sz
+ 7UL) & ~7UL;
627 iommu
->arena
.map
= kzalloc(sz
, GFP_KERNEL
);
628 if (!iommu
->arena
.map
) {
629 printk(KERN_ERR PFX
"Error, kmalloc(arena.map) failed.\n");
632 iommu
->arena
.limit
= num_tsb_entries
;
634 sz
= probe_existing_entries(pbm
, iommu
);
636 printk("%s: Imported %lu TSB entries from OBP\n",
642 #ifdef CONFIG_PCI_MSI
643 struct pci_sun4v_msiq_entry
{
645 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
646 #define MSIQ_VERSION_SHIFT 32
647 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
648 #define MSIQ_TYPE_SHIFT 0
649 #define MSIQ_TYPE_NONE 0x00
650 #define MSIQ_TYPE_MSG 0x01
651 #define MSIQ_TYPE_MSI32 0x02
652 #define MSIQ_TYPE_MSI64 0x03
653 #define MSIQ_TYPE_INTX 0x08
654 #define MSIQ_TYPE_NONE2 0xff
659 u64 req_id
; /* bus/device/func */
660 #define MSIQ_REQID_BUS_MASK 0xff00UL
661 #define MSIQ_REQID_BUS_SHIFT 8
662 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
663 #define MSIQ_REQID_DEVICE_SHIFT 3
664 #define MSIQ_REQID_FUNC_MASK 0x0007UL
665 #define MSIQ_REQID_FUNC_SHIFT 0
669 /* The format of this value is message type dependent.
670 * For MSI bits 15:0 are the data from the MSI packet.
671 * For MSI-X bits 31:0 are the data from the MSI packet.
672 * For MSG, the message code and message routing code where:
673 * bits 39:32 is the bus/device/fn of the msg target-id
674 * bits 18:16 is the message routing code
675 * bits 7:0 is the message code
676 * For INTx the low order 2-bits are:
687 static int pci_sun4v_get_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
690 unsigned long err
, limit
;
692 err
= pci_sun4v_msiq_gethead(pbm
->devhandle
, msiqid
, head
);
696 limit
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
697 if (unlikely(*head
>= limit
))
703 static int pci_sun4v_dequeue_msi(struct pci_pbm_info
*pbm
,
704 unsigned long msiqid
, unsigned long *head
,
707 struct pci_sun4v_msiq_entry
*ep
;
708 unsigned long err
, type
;
710 /* Note: void pointer arithmetic, 'head' is a byte offset */
711 ep
= (pbm
->msi_queues
+ ((msiqid
- pbm
->msiq_first
) *
712 (pbm
->msiq_ent_count
*
713 sizeof(struct pci_sun4v_msiq_entry
))) +
716 if ((ep
->version_type
& MSIQ_TYPE_MASK
) == 0)
719 type
= (ep
->version_type
& MSIQ_TYPE_MASK
) >> MSIQ_TYPE_SHIFT
;
720 if (unlikely(type
!= MSIQ_TYPE_MSI32
&&
721 type
!= MSIQ_TYPE_MSI64
))
726 err
= pci_sun4v_msi_setstate(pbm
->devhandle
,
727 ep
->msi_data
/* msi_num */,
732 /* Clear the entry. */
733 ep
->version_type
&= ~MSIQ_TYPE_MASK
;
735 (*head
) += sizeof(struct pci_sun4v_msiq_entry
);
737 (pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
)))
743 static int pci_sun4v_set_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
748 err
= pci_sun4v_msiq_sethead(pbm
->devhandle
, msiqid
, head
);
755 static int pci_sun4v_msi_setup(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
756 unsigned long msi
, int is_msi64
)
758 if (pci_sun4v_msi_setmsiq(pbm
->devhandle
, msi
, msiqid
,
760 HV_MSITYPE_MSI64
: HV_MSITYPE_MSI32
)))
762 if (pci_sun4v_msi_setstate(pbm
->devhandle
, msi
, HV_MSISTATE_IDLE
))
764 if (pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_VALID
))
769 static int pci_sun4v_msi_teardown(struct pci_pbm_info
*pbm
, unsigned long msi
)
771 unsigned long err
, msiqid
;
773 err
= pci_sun4v_msi_getmsiq(pbm
->devhandle
, msi
, &msiqid
);
777 pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_INVALID
);
782 static int pci_sun4v_msiq_alloc(struct pci_pbm_info
*pbm
)
784 unsigned long q_size
, alloc_size
, pages
, order
;
787 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
788 alloc_size
= (pbm
->msiq_num
* q_size
);
789 order
= get_order(alloc_size
);
790 pages
= __get_free_pages(GFP_KERNEL
| __GFP_COMP
, order
);
792 printk(KERN_ERR
"MSI: Cannot allocate MSI queues (o=%lu).\n",
796 memset((char *)pages
, 0, PAGE_SIZE
<< order
);
797 pbm
->msi_queues
= (void *) pages
;
799 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
800 unsigned long err
, base
= __pa(pages
+ (i
* q_size
));
801 unsigned long ret1
, ret2
;
803 err
= pci_sun4v_msiq_conf(pbm
->devhandle
,
805 base
, pbm
->msiq_ent_count
);
807 printk(KERN_ERR
"MSI: msiq register fails (err=%lu)\n",
812 err
= pci_sun4v_msiq_info(pbm
->devhandle
,
816 printk(KERN_ERR
"MSI: Cannot read msiq (err=%lu)\n",
820 if (ret1
!= base
|| ret2
!= pbm
->msiq_ent_count
) {
821 printk(KERN_ERR
"MSI: Bogus qconf "
822 "expected[%lx:%x] got[%lx:%lx]\n",
823 base
, pbm
->msiq_ent_count
,
832 free_pages(pages
, order
);
836 static void pci_sun4v_msiq_free(struct pci_pbm_info
*pbm
)
838 unsigned long q_size
, alloc_size
, pages
, order
;
841 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
842 unsigned long msiqid
= pbm
->msiq_first
+ i
;
844 (void) pci_sun4v_msiq_conf(pbm
->devhandle
, msiqid
, 0UL, 0);
847 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
848 alloc_size
= (pbm
->msiq_num
* q_size
);
849 order
= get_order(alloc_size
);
851 pages
= (unsigned long) pbm
->msi_queues
;
853 free_pages(pages
, order
);
855 pbm
->msi_queues
= NULL
;
858 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info
*pbm
,
859 unsigned long msiqid
,
860 unsigned long devino
)
862 unsigned int virt_irq
= sun4v_build_irq(pbm
->devhandle
, devino
);
867 if (pci_sun4v_msiq_setstate(pbm
->devhandle
, msiqid
, HV_MSIQSTATE_IDLE
))
869 if (pci_sun4v_msiq_setvalid(pbm
->devhandle
, msiqid
, HV_MSIQ_VALID
))
875 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops
= {
876 .get_head
= pci_sun4v_get_head
,
877 .dequeue_msi
= pci_sun4v_dequeue_msi
,
878 .set_head
= pci_sun4v_set_head
,
879 .msi_setup
= pci_sun4v_msi_setup
,
880 .msi_teardown
= pci_sun4v_msi_teardown
,
881 .msiq_alloc
= pci_sun4v_msiq_alloc
,
882 .msiq_free
= pci_sun4v_msiq_free
,
883 .msiq_build_irq
= pci_sun4v_msiq_build_irq
,
886 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
888 sparc64_pbm_msi_init(pbm
, &pci_sun4v_msiq_ops
);
890 #else /* CONFIG_PCI_MSI */
891 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
894 #endif /* !(CONFIG_PCI_MSI) */
896 static int __init
pci_sun4v_pbm_init(struct pci_controller_info
*p
,
897 struct device_node
*dp
, u32 devhandle
)
899 struct pci_pbm_info
*pbm
;
902 if (devhandle
& 0x40)
907 pbm
->next
= pci_pbm_root
;
910 pbm
->numa_node
= of_node_to_nid(dp
);
912 pbm
->pci_ops
= &sun4v_pci_ops
;
913 pbm
->config_space_reg_bits
= 12;
915 pbm
->index
= pci_num_pbms
++;
920 pbm
->devhandle
= devhandle
;
922 pbm
->name
= dp
->full_name
;
924 printk("%s: SUN4V PCI Bus Module\n", pbm
->name
);
925 printk("%s: On NUMA node %d\n", pbm
->name
, pbm
->numa_node
);
927 pci_determine_mem_io_space(pbm
);
929 pci_get_pbm_props(pbm
);
931 err
= pci_sun4v_iommu_init(pbm
);
935 pci_sun4v_msi_init(pbm
);
937 pci_sun4v_scan_bus(pbm
);
942 static int __devinit
pci_sun4v_probe(struct of_device
*op
,
943 const struct of_device_id
*match
)
945 const struct linux_prom64_registers
*regs
;
946 static int hvapi_negotiated
= 0;
947 struct pci_controller_info
*p
;
948 struct pci_pbm_info
*pbm
;
949 struct device_node
*dp
;
956 if (!hvapi_negotiated
++) {
957 int err
= sun4v_hvapi_register(HV_GRP_PCI
,
962 printk(KERN_ERR PFX
"Could not register hvapi, "
966 printk(KERN_INFO PFX
"Registered hvapi major[%lu] minor[%lu]\n",
967 vpci_major
, vpci_minor
);
969 dma_ops
= &sun4v_dma_ops
;
972 regs
= of_get_property(dp
, "reg", NULL
);
974 printk(KERN_ERR PFX
"Could not find config registers\n");
977 devhandle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
979 for (pbm
= pci_pbm_root
; pbm
; pbm
= pbm
->next
) {
980 if (pbm
->devhandle
== (devhandle
^ 0x40)) {
981 return pci_sun4v_pbm_init(pbm
->parent
, dp
, devhandle
);
985 for_each_possible_cpu(i
) {
986 unsigned long page
= get_zeroed_page(GFP_ATOMIC
);
991 per_cpu(iommu_batch
, i
).pglist
= (u64
*) page
;
994 p
= kzalloc(sizeof(struct pci_controller_info
), GFP_ATOMIC
);
996 printk(KERN_ERR PFX
"Could not allocate pci_controller_info\n");
1000 iommu
= kzalloc(sizeof(struct iommu
), GFP_ATOMIC
);
1002 printk(KERN_ERR PFX
"Could not allocate pbm A iommu\n");
1006 p
->pbm_A
.iommu
= iommu
;
1008 iommu
= kzalloc(sizeof(struct iommu
), GFP_ATOMIC
);
1010 printk(KERN_ERR PFX
"Could not allocate pbm B iommu\n");
1014 p
->pbm_B
.iommu
= iommu
;
1016 return pci_sun4v_pbm_init(p
, dp
, devhandle
);
1021 kfree(p
->pbm_A
.iommu
);
1023 kfree(p
->pbm_B
.iommu
);
1029 static struct of_device_id __initdata pci_sun4v_match
[] = {
1032 .compatible
= "SUNW,sun4v-pci",
1037 static struct of_platform_driver pci_sun4v_driver
= {
1038 .name
= DRIVER_NAME
,
1039 .match_table
= pci_sun4v_match
,
1040 .probe
= pci_sun4v_probe
,
1043 static int __init
pci_sun4v_init(void)
1045 return of_register_driver(&pci_sun4v_driver
, &of_bus_type
);
1048 subsys_initcall(pci_sun4v_init
);