]>
Commit | Line | Data |
---|---|---|
8f6a93a1 DM |
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. |
2 | * | |
9fd8b647 | 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) |
8f6a93a1 DM |
4 | */ |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/types.h> | |
8 | #include <linux/pci.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/interrupt.h> | |
18397944 | 12 | #include <linux/percpu.h> |
35a17eb6 DM |
13 | #include <linux/irq.h> |
14 | #include <linux/msi.h> | |
8f6a93a1 | 15 | |
8f6a93a1 DM |
16 | #include <asm/iommu.h> |
17 | #include <asm/irq.h> | |
18 | #include <asm/upa.h> | |
19 | #include <asm/pstate.h> | |
20 | #include <asm/oplib.h> | |
21 | #include <asm/hypervisor.h> | |
e87dc350 | 22 | #include <asm/prom.h> |
8f6a93a1 DM |
23 | |
24 | #include "pci_impl.h" | |
25 | #include "iommu_common.h" | |
26 | ||
bade5622 DM |
27 | #include "pci_sun4v.h" |
28 | ||
7c8f486a | 29 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
18397944 | 30 | |
16ce82d8 | 31 | struct iommu_batch { |
6a32fd4d DM |
32 | struct pci_dev *pdev; /* Device mapping is for. */ |
33 | unsigned long prot; /* IOMMU page protections */ | |
34 | unsigned long entry; /* Index into IOTSB. */ | |
35 | u64 *pglist; /* List of physical pages */ | |
36 | unsigned long npages; /* Number of pages in list. */ | |
18397944 DM |
37 | }; |
38 | ||
16ce82d8 | 39 | static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch); |
6a32fd4d DM |
40 | |
41 | /* Interrupts must be disabled. */ | |
42 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) | |
43 | { | |
16ce82d8 | 44 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
6a32fd4d DM |
45 | |
46 | p->pdev = pdev; | |
47 | p->prot = prot; | |
48 | p->entry = entry; | |
49 | p->npages = 0; | |
50 | } | |
51 | ||
52 | /* Interrupts must be disabled. */ | |
16ce82d8 | 53 | static long pci_iommu_batch_flush(struct iommu_batch *p) |
6a32fd4d | 54 | { |
a2fb23af DM |
55 | struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller; |
56 | unsigned long devhandle = pbm->devhandle; | |
6a32fd4d DM |
57 | unsigned long prot = p->prot; |
58 | unsigned long entry = p->entry; | |
59 | u64 *pglist = p->pglist; | |
60 | unsigned long npages = p->npages; | |
61 | ||
d82965c1 | 62 | while (npages != 0) { |
6a32fd4d DM |
63 | long num; |
64 | ||
65 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | |
66 | npages, prot, __pa(pglist)); | |
67 | if (unlikely(num < 0)) { | |
68 | if (printk_ratelimit()) | |
69 | printk("pci_iommu_batch_flush: IOMMU map of " | |
70 | "[%08lx:%08lx:%lx:%lx:%lx] failed with " | |
71 | "status %ld\n", | |
72 | devhandle, HV_PCI_TSBID(0, entry), | |
73 | npages, prot, __pa(pglist), num); | |
74 | return -1; | |
75 | } | |
76 | ||
77 | entry += num; | |
78 | npages -= num; | |
79 | pglist += num; | |
d82965c1 | 80 | } |
6a32fd4d DM |
81 | |
82 | p->entry = entry; | |
83 | p->npages = 0; | |
84 | ||
85 | return 0; | |
86 | } | |
87 | ||
88 | /* Interrupts must be disabled. */ | |
89 | static inline long pci_iommu_batch_add(u64 phys_page) | |
90 | { | |
16ce82d8 | 91 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
6a32fd4d DM |
92 | |
93 | BUG_ON(p->npages >= PGLIST_NENTS); | |
94 | ||
95 | p->pglist[p->npages++] = phys_page; | |
96 | if (p->npages == PGLIST_NENTS) | |
97 | return pci_iommu_batch_flush(p); | |
98 | ||
99 | return 0; | |
100 | } | |
101 | ||
102 | /* Interrupts must be disabled. */ | |
103 | static inline long pci_iommu_batch_end(void) | |
104 | { | |
16ce82d8 | 105 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
6a32fd4d DM |
106 | |
107 | BUG_ON(p->npages >= PGLIST_NENTS); | |
108 | ||
109 | return pci_iommu_batch_flush(p); | |
110 | } | |
18397944 | 111 | |
9b3627f3 | 112 | static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages) |
18397944 DM |
113 | { |
114 | unsigned long n, i, start, end, limit; | |
115 | int pass; | |
116 | ||
117 | limit = arena->limit; | |
118 | start = arena->hint; | |
119 | pass = 0; | |
120 | ||
121 | again: | |
122 | n = find_next_zero_bit(arena->map, limit, start); | |
123 | end = n + npages; | |
124 | if (unlikely(end >= limit)) { | |
125 | if (likely(pass < 1)) { | |
126 | limit = start; | |
127 | start = 0; | |
128 | pass++; | |
129 | goto again; | |
130 | } else { | |
131 | /* Scanned the whole thing, give up. */ | |
132 | return -1; | |
133 | } | |
134 | } | |
135 | ||
136 | for (i = n; i < end; i++) { | |
137 | if (test_bit(i, arena->map)) { | |
138 | start = i + 1; | |
139 | goto again; | |
140 | } | |
141 | } | |
142 | ||
143 | for (i = n; i < end; i++) | |
144 | __set_bit(i, arena->map); | |
145 | ||
146 | arena->hint = end; | |
147 | ||
148 | return n; | |
149 | } | |
150 | ||
9b3627f3 | 151 | static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) |
18397944 DM |
152 | { |
153 | unsigned long i; | |
154 | ||
155 | for (i = base; i < (base + npages); i++) | |
156 | __clear_bit(i, arena->map); | |
157 | } | |
158 | ||
42f14237 | 159 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) |
8f6a93a1 | 160 | { |
16ce82d8 | 161 | struct iommu *iommu; |
7c8f486a | 162 | unsigned long flags, order, first_page, npages, n; |
18397944 DM |
163 | void *ret; |
164 | long entry; | |
18397944 DM |
165 | |
166 | size = IO_PAGE_ALIGN(size); | |
167 | order = get_order(size); | |
6a32fd4d | 168 | if (unlikely(order >= MAX_ORDER)) |
18397944 DM |
169 | return NULL; |
170 | ||
171 | npages = size >> IO_PAGE_SHIFT; | |
18397944 | 172 | |
42f14237 | 173 | first_page = __get_free_pages(gfp, order); |
6a32fd4d | 174 | if (unlikely(first_page == 0UL)) |
18397944 | 175 | return NULL; |
e7a0453e | 176 | |
18397944 DM |
177 | memset((char *)first_page, 0, PAGE_SIZE << order); |
178 | ||
a2fb23af | 179 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
180 | |
181 | spin_lock_irqsave(&iommu->lock, flags); | |
182 | entry = pci_arena_alloc(&iommu->arena, npages); | |
183 | spin_unlock_irqrestore(&iommu->lock, flags); | |
184 | ||
6a32fd4d DM |
185 | if (unlikely(entry < 0L)) |
186 | goto arena_alloc_fail; | |
18397944 DM |
187 | |
188 | *dma_addrp = (iommu->page_table_map_base + | |
189 | (entry << IO_PAGE_SHIFT)); | |
190 | ret = (void *) first_page; | |
191 | first_page = __pa(first_page); | |
192 | ||
6a32fd4d | 193 | local_irq_save(flags); |
18397944 | 194 | |
6a32fd4d DM |
195 | pci_iommu_batch_start(pdev, |
196 | (HV_PCI_MAP_ATTR_READ | | |
197 | HV_PCI_MAP_ATTR_WRITE), | |
198 | entry); | |
18397944 | 199 | |
6a32fd4d DM |
200 | for (n = 0; n < npages; n++) { |
201 | long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); | |
202 | if (unlikely(err < 0L)) | |
203 | goto iommu_map_fail; | |
204 | } | |
18397944 | 205 | |
6a32fd4d DM |
206 | if (unlikely(pci_iommu_batch_end() < 0L)) |
207 | goto iommu_map_fail; | |
18397944 | 208 | |
6a32fd4d | 209 | local_irq_restore(flags); |
18397944 DM |
210 | |
211 | return ret; | |
6a32fd4d DM |
212 | |
213 | iommu_map_fail: | |
214 | /* Interrupts are disabled. */ | |
215 | spin_lock(&iommu->lock); | |
216 | pci_arena_free(&iommu->arena, entry, npages); | |
217 | spin_unlock_irqrestore(&iommu->lock, flags); | |
218 | ||
219 | arena_alloc_fail: | |
220 | free_pages(first_page, order); | |
221 | return NULL; | |
8f6a93a1 DM |
222 | } |
223 | ||
224 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | |
225 | { | |
a2fb23af | 226 | struct pci_pbm_info *pbm; |
16ce82d8 | 227 | struct iommu *iommu; |
7c8f486a DM |
228 | unsigned long flags, order, npages, entry; |
229 | u32 devhandle; | |
18397944 DM |
230 | |
231 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | |
a2fb23af DM |
232 | iommu = pdev->dev.archdata.iommu; |
233 | pbm = pdev->dev.archdata.host_controller; | |
234 | devhandle = pbm->devhandle; | |
18397944 DM |
235 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
236 | ||
237 | spin_lock_irqsave(&iommu->lock, flags); | |
238 | ||
239 | pci_arena_free(&iommu->arena, entry, npages); | |
240 | ||
241 | do { | |
242 | unsigned long num; | |
243 | ||
244 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
245 | npages); | |
246 | entry += num; | |
247 | npages -= num; | |
248 | } while (npages != 0); | |
249 | ||
250 | spin_unlock_irqrestore(&iommu->lock, flags); | |
251 | ||
252 | order = get_order(size); | |
253 | if (order < 10) | |
254 | free_pages((unsigned long)cpu, order); | |
8f6a93a1 DM |
255 | } |
256 | ||
257 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | |
258 | { | |
16ce82d8 | 259 | struct iommu *iommu; |
18397944 | 260 | unsigned long flags, npages, oaddr; |
7c8f486a | 261 | unsigned long i, base_paddr; |
6a32fd4d | 262 | u32 bus_addr, ret; |
18397944 DM |
263 | unsigned long prot; |
264 | long entry; | |
18397944 | 265 | |
a2fb23af | 266 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
267 | |
268 | if (unlikely(direction == PCI_DMA_NONE)) | |
269 | goto bad; | |
270 | ||
271 | oaddr = (unsigned long)ptr; | |
272 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | |
273 | npages >>= IO_PAGE_SHIFT; | |
18397944 DM |
274 | |
275 | spin_lock_irqsave(&iommu->lock, flags); | |
276 | entry = pci_arena_alloc(&iommu->arena, npages); | |
277 | spin_unlock_irqrestore(&iommu->lock, flags); | |
278 | ||
279 | if (unlikely(entry < 0L)) | |
280 | goto bad; | |
281 | ||
282 | bus_addr = (iommu->page_table_map_base + | |
283 | (entry << IO_PAGE_SHIFT)); | |
284 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | |
285 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | |
286 | prot = HV_PCI_MAP_ATTR_READ; | |
287 | if (direction != PCI_DMA_TODEVICE) | |
288 | prot |= HV_PCI_MAP_ATTR_WRITE; | |
289 | ||
6a32fd4d | 290 | local_irq_save(flags); |
18397944 | 291 | |
6a32fd4d | 292 | pci_iommu_batch_start(pdev, prot, entry); |
18397944 | 293 | |
6a32fd4d DM |
294 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { |
295 | long err = pci_iommu_batch_add(base_paddr); | |
296 | if (unlikely(err < 0L)) | |
297 | goto iommu_map_fail; | |
298 | } | |
299 | if (unlikely(pci_iommu_batch_end() < 0L)) | |
300 | goto iommu_map_fail; | |
18397944 | 301 | |
6a32fd4d | 302 | local_irq_restore(flags); |
18397944 DM |
303 | |
304 | return ret; | |
305 | ||
306 | bad: | |
307 | if (printk_ratelimit()) | |
308 | WARN_ON(1); | |
309 | return PCI_DMA_ERROR_CODE; | |
6a32fd4d DM |
310 | |
311 | iommu_map_fail: | |
312 | /* Interrupts are disabled. */ | |
313 | spin_lock(&iommu->lock); | |
314 | pci_arena_free(&iommu->arena, entry, npages); | |
315 | spin_unlock_irqrestore(&iommu->lock, flags); | |
316 | ||
317 | return PCI_DMA_ERROR_CODE; | |
8f6a93a1 DM |
318 | } |
319 | ||
320 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | |
321 | { | |
a2fb23af | 322 | struct pci_pbm_info *pbm; |
16ce82d8 | 323 | struct iommu *iommu; |
7c8f486a | 324 | unsigned long flags, npages; |
18397944 | 325 | long entry; |
7c8f486a | 326 | u32 devhandle; |
18397944 DM |
327 | |
328 | if (unlikely(direction == PCI_DMA_NONE)) { | |
329 | if (printk_ratelimit()) | |
330 | WARN_ON(1); | |
331 | return; | |
332 | } | |
333 | ||
a2fb23af DM |
334 | iommu = pdev->dev.archdata.iommu; |
335 | pbm = pdev->dev.archdata.host_controller; | |
336 | devhandle = pbm->devhandle; | |
18397944 DM |
337 | |
338 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | |
339 | npages >>= IO_PAGE_SHIFT; | |
340 | bus_addr &= IO_PAGE_MASK; | |
341 | ||
342 | spin_lock_irqsave(&iommu->lock, flags); | |
343 | ||
344 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | |
345 | pci_arena_free(&iommu->arena, entry, npages); | |
346 | ||
347 | do { | |
348 | unsigned long num; | |
349 | ||
350 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
351 | npages); | |
352 | entry += num; | |
353 | npages -= num; | |
354 | } while (npages != 0); | |
355 | ||
356 | spin_unlock_irqrestore(&iommu->lock, flags); | |
357 | } | |
358 | ||
359 | #define SG_ENT_PHYS_ADDRESS(SG) \ | |
360 | (__pa(page_address((SG)->page)) + (SG)->offset) | |
361 | ||
6a32fd4d | 362 | static inline long fill_sg(long entry, struct pci_dev *pdev, |
18397944 DM |
363 | struct scatterlist *sg, |
364 | int nused, int nelems, unsigned long prot) | |
365 | { | |
366 | struct scatterlist *dma_sg = sg; | |
367 | struct scatterlist *sg_end = sg + nelems; | |
6a32fd4d DM |
368 | unsigned long flags; |
369 | int i; | |
370 | ||
371 | local_irq_save(flags); | |
372 | ||
373 | pci_iommu_batch_start(pdev, prot, entry); | |
18397944 | 374 | |
18397944 DM |
375 | for (i = 0; i < nused; i++) { |
376 | unsigned long pteval = ~0UL; | |
377 | u32 dma_npages; | |
378 | ||
379 | dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + | |
380 | dma_sg->dma_length + | |
381 | ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; | |
382 | do { | |
383 | unsigned long offset; | |
384 | signed int len; | |
385 | ||
386 | /* If we are here, we know we have at least one | |
387 | * more page to map. So walk forward until we | |
388 | * hit a page crossing, and begin creating new | |
389 | * mappings from that spot. | |
390 | */ | |
391 | for (;;) { | |
392 | unsigned long tmp; | |
393 | ||
394 | tmp = SG_ENT_PHYS_ADDRESS(sg); | |
395 | len = sg->length; | |
396 | if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { | |
397 | pteval = tmp & IO_PAGE_MASK; | |
398 | offset = tmp & (IO_PAGE_SIZE - 1UL); | |
399 | break; | |
400 | } | |
401 | if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { | |
402 | pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; | |
403 | offset = 0UL; | |
404 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | |
405 | break; | |
406 | } | |
407 | sg++; | |
408 | } | |
409 | ||
410 | pteval = (pteval & IOPTE_PAGE); | |
411 | while (len > 0) { | |
6a32fd4d DM |
412 | long err; |
413 | ||
414 | err = pci_iommu_batch_add(pteval); | |
415 | if (unlikely(err < 0L)) | |
416 | goto iommu_map_failed; | |
417 | ||
18397944 DM |
418 | pteval += IO_PAGE_SIZE; |
419 | len -= (IO_PAGE_SIZE - offset); | |
420 | offset = 0; | |
421 | dma_npages--; | |
422 | } | |
423 | ||
424 | pteval = (pteval & IOPTE_PAGE) + len; | |
425 | sg++; | |
426 | ||
427 | /* Skip over any tail mappings we've fully mapped, | |
428 | * adjusting pteval along the way. Stop when we | |
429 | * detect a page crossing event. | |
430 | */ | |
431 | while (sg < sg_end && | |
432 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | |
433 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | |
434 | ((pteval ^ | |
435 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | |
436 | pteval += sg->length; | |
437 | sg++; | |
438 | } | |
439 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | |
440 | pteval = ~0UL; | |
441 | } while (dma_npages != 0); | |
442 | dma_sg++; | |
443 | } | |
444 | ||
6a32fd4d DM |
445 | if (unlikely(pci_iommu_batch_end() < 0L)) |
446 | goto iommu_map_failed; | |
18397944 | 447 | |
6a32fd4d DM |
448 | local_irq_restore(flags); |
449 | return 0; | |
18397944 | 450 | |
6a32fd4d DM |
451 | iommu_map_failed: |
452 | local_irq_restore(flags); | |
453 | return -1L; | |
8f6a93a1 DM |
454 | } |
455 | ||
456 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
457 | { | |
16ce82d8 | 458 | struct iommu *iommu; |
7c8f486a | 459 | unsigned long flags, npages, prot; |
6a32fd4d | 460 | u32 dma_base; |
18397944 | 461 | struct scatterlist *sgtmp; |
6a32fd4d | 462 | long entry, err; |
18397944 DM |
463 | int used; |
464 | ||
465 | /* Fast path single entry scatterlists. */ | |
466 | if (nelems == 1) { | |
467 | sglist->dma_address = | |
468 | pci_4v_map_single(pdev, | |
469 | (page_address(sglist->page) + sglist->offset), | |
470 | sglist->length, direction); | |
471 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | |
472 | return 0; | |
473 | sglist->dma_length = sglist->length; | |
474 | return 1; | |
475 | } | |
476 | ||
a2fb23af | 477 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
478 | |
479 | if (unlikely(direction == PCI_DMA_NONE)) | |
480 | goto bad; | |
481 | ||
482 | /* Step 1: Prepare scatter list. */ | |
483 | npages = prepare_sg(sglist, nelems); | |
18397944 DM |
484 | |
485 | /* Step 2: Allocate a cluster and context, if necessary. */ | |
486 | spin_lock_irqsave(&iommu->lock, flags); | |
487 | entry = pci_arena_alloc(&iommu->arena, npages); | |
488 | spin_unlock_irqrestore(&iommu->lock, flags); | |
489 | ||
490 | if (unlikely(entry < 0L)) | |
491 | goto bad; | |
492 | ||
493 | dma_base = iommu->page_table_map_base + | |
494 | (entry << IO_PAGE_SHIFT); | |
495 | ||
496 | /* Step 3: Normalize DMA addresses. */ | |
497 | used = nelems; | |
498 | ||
499 | sgtmp = sglist; | |
500 | while (used && sgtmp->dma_length) { | |
501 | sgtmp->dma_address += dma_base; | |
502 | sgtmp++; | |
503 | used--; | |
504 | } | |
505 | used = nelems - used; | |
506 | ||
507 | /* Step 4: Create the mappings. */ | |
508 | prot = HV_PCI_MAP_ATTR_READ; | |
509 | if (direction != PCI_DMA_TODEVICE) | |
510 | prot |= HV_PCI_MAP_ATTR_WRITE; | |
511 | ||
6a32fd4d DM |
512 | err = fill_sg(entry, pdev, sglist, used, nelems, prot); |
513 | if (unlikely(err < 0L)) | |
514 | goto iommu_map_failed; | |
18397944 DM |
515 | |
516 | return used; | |
517 | ||
518 | bad: | |
519 | if (printk_ratelimit()) | |
520 | WARN_ON(1); | |
521 | return 0; | |
6a32fd4d DM |
522 | |
523 | iommu_map_failed: | |
524 | spin_lock_irqsave(&iommu->lock, flags); | |
525 | pci_arena_free(&iommu->arena, entry, npages); | |
526 | spin_unlock_irqrestore(&iommu->lock, flags); | |
527 | ||
528 | return 0; | |
8f6a93a1 DM |
529 | } |
530 | ||
531 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
532 | { | |
a2fb23af | 533 | struct pci_pbm_info *pbm; |
16ce82d8 | 534 | struct iommu *iommu; |
7c8f486a | 535 | unsigned long flags, i, npages; |
18397944 | 536 | long entry; |
7c8f486a | 537 | u32 devhandle, bus_addr; |
18397944 DM |
538 | |
539 | if (unlikely(direction == PCI_DMA_NONE)) { | |
540 | if (printk_ratelimit()) | |
541 | WARN_ON(1); | |
542 | } | |
543 | ||
a2fb23af DM |
544 | iommu = pdev->dev.archdata.iommu; |
545 | pbm = pdev->dev.archdata.host_controller; | |
546 | devhandle = pbm->devhandle; | |
18397944 DM |
547 | |
548 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | |
549 | ||
550 | for (i = 1; i < nelems; i++) | |
551 | if (sglist[i].dma_length == 0) | |
552 | break; | |
553 | i--; | |
554 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | |
555 | bus_addr) >> IO_PAGE_SHIFT; | |
556 | ||
557 | entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | |
558 | ||
559 | spin_lock_irqsave(&iommu->lock, flags); | |
560 | ||
561 | pci_arena_free(&iommu->arena, entry, npages); | |
562 | ||
563 | do { | |
564 | unsigned long num; | |
565 | ||
566 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
567 | npages); | |
568 | entry += num; | |
569 | npages -= num; | |
570 | } while (npages != 0); | |
571 | ||
572 | spin_unlock_irqrestore(&iommu->lock, flags); | |
8f6a93a1 DM |
573 | } |
574 | ||
575 | static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | |
576 | { | |
18397944 | 577 | /* Nothing to do... */ |
8f6a93a1 DM |
578 | } |
579 | ||
580 | static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
581 | { | |
18397944 | 582 | /* Nothing to do... */ |
8f6a93a1 DM |
583 | } |
584 | ||
c6e87566 | 585 | const struct pci_iommu_ops pci_sun4v_iommu_ops = { |
8f6a93a1 DM |
586 | .alloc_consistent = pci_4v_alloc_consistent, |
587 | .free_consistent = pci_4v_free_consistent, | |
588 | .map_single = pci_4v_map_single, | |
589 | .unmap_single = pci_4v_unmap_single, | |
590 | .map_sg = pci_4v_map_sg, | |
591 | .unmap_sg = pci_4v_unmap_sg, | |
592 | .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, | |
593 | .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, | |
594 | }; | |
595 | ||
34768bc8 | 596 | static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm) |
bade5622 | 597 | { |
e87dc350 DM |
598 | struct property *prop; |
599 | struct device_node *dp; | |
600 | ||
34768bc8 DM |
601 | dp = pbm->prom_node; |
602 | prop = of_find_property(dp, "66mhz-capable", NULL); | |
603 | pbm->is_66mhz_capable = (prop != NULL); | |
604 | pbm->pci_bus = pci_scan_one_pbm(pbm); | |
c2609267 DM |
605 | |
606 | /* XXX register error interrupt handlers XXX */ | |
bade5622 DM |
607 | } |
608 | ||
e7a0453e | 609 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, |
16ce82d8 | 610 | struct iommu *iommu) |
18397944 | 611 | { |
9b3627f3 | 612 | struct iommu_arena *arena = &iommu->arena; |
e7a0453e | 613 | unsigned long i, cnt = 0; |
7c8f486a | 614 | u32 devhandle; |
18397944 DM |
615 | |
616 | devhandle = pbm->devhandle; | |
617 | for (i = 0; i < arena->limit; i++) { | |
618 | unsigned long ret, io_attrs, ra; | |
619 | ||
620 | ret = pci_sun4v_iommu_getmap(devhandle, | |
621 | HV_PCI_TSBID(0, i), | |
622 | &io_attrs, &ra); | |
e7a0453e | 623 | if (ret == HV_EOK) { |
c2a5a46b DM |
624 | if (page_in_phys_avail(ra)) { |
625 | pci_sun4v_iommu_demap(devhandle, | |
626 | HV_PCI_TSBID(0, i), 1); | |
627 | } else { | |
628 | cnt++; | |
629 | __set_bit(i, arena->map); | |
630 | } | |
e7a0453e | 631 | } |
18397944 | 632 | } |
e7a0453e DM |
633 | |
634 | return cnt; | |
18397944 DM |
635 | } |
636 | ||
bade5622 DM |
637 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) |
638 | { | |
16ce82d8 | 639 | struct iommu *iommu = pbm->iommu; |
e87dc350 | 640 | struct property *prop; |
18397944 DM |
641 | unsigned long num_tsb_entries, sz; |
642 | u32 vdma[2], dma_mask, dma_offset; | |
e87dc350 DM |
643 | int tsbsize; |
644 | ||
645 | prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); | |
646 | if (prop) { | |
647 | u32 *val = prop->value; | |
18397944 | 648 | |
e87dc350 DM |
649 | vdma[0] = val[0]; |
650 | vdma[1] = val[1]; | |
651 | } else { | |
18397944 DM |
652 | /* No property, use default values. */ |
653 | vdma[0] = 0x80000000; | |
654 | vdma[1] = 0x80000000; | |
655 | } | |
656 | ||
657 | dma_mask = vdma[0]; | |
658 | switch (vdma[1]) { | |
659 | case 0x20000000: | |
660 | dma_mask |= 0x1fffffff; | |
661 | tsbsize = 64; | |
662 | break; | |
663 | ||
664 | case 0x40000000: | |
665 | dma_mask |= 0x3fffffff; | |
666 | tsbsize = 128; | |
667 | break; | |
668 | ||
669 | case 0x80000000: | |
670 | dma_mask |= 0x7fffffff; | |
e7a0453e | 671 | tsbsize = 256; |
18397944 DM |
672 | break; |
673 | ||
674 | default: | |
675 | prom_printf("PCI-SUN4V: strange virtual-dma size.\n"); | |
676 | prom_halt(); | |
677 | }; | |
678 | ||
e7a0453e DM |
679 | tsbsize *= (8 * 1024); |
680 | ||
18397944 DM |
681 | num_tsb_entries = tsbsize / sizeof(iopte_t); |
682 | ||
683 | dma_offset = vdma[0]; | |
684 | ||
685 | /* Setup initial software IOMMU state. */ | |
686 | spin_lock_init(&iommu->lock); | |
687 | iommu->ctx_lowest_free = 1; | |
688 | iommu->page_table_map_base = dma_offset; | |
689 | iommu->dma_addr_mask = dma_mask; | |
690 | ||
691 | /* Allocate and initialize the free area map. */ | |
692 | sz = num_tsb_entries / 8; | |
693 | sz = (sz + 7UL) & ~7UL; | |
982c2064 | 694 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); |
18397944 DM |
695 | if (!iommu->arena.map) { |
696 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | |
697 | prom_halt(); | |
698 | } | |
18397944 DM |
699 | iommu->arena.limit = num_tsb_entries; |
700 | ||
e7a0453e | 701 | sz = probe_existing_entries(pbm, iommu); |
c2a5a46b DM |
702 | if (sz) |
703 | printk("%s: Imported %lu TSB entries from OBP\n", | |
704 | pbm->name, sz); | |
bade5622 DM |
705 | } |
706 | ||
35a17eb6 DM |
707 | #ifdef CONFIG_PCI_MSI |
708 | struct pci_sun4v_msiq_entry { | |
709 | u64 version_type; | |
710 | #define MSIQ_VERSION_MASK 0xffffffff00000000UL | |
711 | #define MSIQ_VERSION_SHIFT 32 | |
712 | #define MSIQ_TYPE_MASK 0x00000000000000ffUL | |
713 | #define MSIQ_TYPE_SHIFT 0 | |
714 | #define MSIQ_TYPE_NONE 0x00 | |
715 | #define MSIQ_TYPE_MSG 0x01 | |
716 | #define MSIQ_TYPE_MSI32 0x02 | |
717 | #define MSIQ_TYPE_MSI64 0x03 | |
718 | #define MSIQ_TYPE_INTX 0x08 | |
719 | #define MSIQ_TYPE_NONE2 0xff | |
720 | ||
721 | u64 intx_sysino; | |
722 | u64 reserved1; | |
723 | u64 stick; | |
724 | u64 req_id; /* bus/device/func */ | |
725 | #define MSIQ_REQID_BUS_MASK 0xff00UL | |
726 | #define MSIQ_REQID_BUS_SHIFT 8 | |
727 | #define MSIQ_REQID_DEVICE_MASK 0x00f8UL | |
728 | #define MSIQ_REQID_DEVICE_SHIFT 3 | |
729 | #define MSIQ_REQID_FUNC_MASK 0x0007UL | |
730 | #define MSIQ_REQID_FUNC_SHIFT 0 | |
731 | ||
732 | u64 msi_address; | |
733 | ||
e5dd42e4 | 734 | /* The format of this value is message type dependent. |
35a17eb6 DM |
735 | * For MSI bits 15:0 are the data from the MSI packet. |
736 | * For MSI-X bits 31:0 are the data from the MSI packet. | |
737 | * For MSG, the message code and message routing code where: | |
738 | * bits 39:32 is the bus/device/fn of the msg target-id | |
739 | * bits 18:16 is the message routing code | |
740 | * bits 7:0 is the message code | |
741 | * For INTx the low order 2-bits are: | |
742 | * 00 - INTA | |
743 | * 01 - INTB | |
744 | * 10 - INTC | |
745 | * 11 - INTD | |
746 | */ | |
747 | u64 msi_data; | |
748 | ||
749 | u64 reserved2; | |
750 | }; | |
751 | ||
752 | /* For now this just runs as a pre-handler for the real interrupt handler. | |
753 | * So we just walk through the queue and ACK all the entries, update the | |
754 | * head pointer, and return. | |
755 | * | |
756 | * In the longer term it would be nice to do something more integrated | |
757 | * wherein we can pass in some of this MSI info to the drivers. This | |
758 | * would be most useful for PCIe fabric error messages, although we could | |
759 | * invoke those directly from the loop here in order to pass the info around. | |
760 | */ | |
761 | static void pci_sun4v_msi_prehandler(unsigned int ino, void *data1, void *data2) | |
762 | { | |
763 | struct pci_pbm_info *pbm = data1; | |
764 | struct pci_sun4v_msiq_entry *base, *ep; | |
765 | unsigned long msiqid, orig_head, head, type, err; | |
766 | ||
767 | msiqid = (unsigned long) data2; | |
768 | ||
769 | head = 0xdeadbeef; | |
770 | err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, &head); | |
771 | if (unlikely(err)) | |
772 | goto hv_error_get; | |
773 | ||
774 | if (unlikely(head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))) | |
775 | goto bad_offset; | |
776 | ||
777 | head /= sizeof(struct pci_sun4v_msiq_entry); | |
778 | orig_head = head; | |
779 | base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * | |
780 | (pbm->msiq_ent_count * | |
781 | sizeof(struct pci_sun4v_msiq_entry)))); | |
782 | ep = &base[head]; | |
783 | while ((ep->version_type & MSIQ_TYPE_MASK) != 0) { | |
784 | type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; | |
785 | if (unlikely(type != MSIQ_TYPE_MSI32 && | |
786 | type != MSIQ_TYPE_MSI64)) | |
787 | goto bad_type; | |
788 | ||
789 | pci_sun4v_msi_setstate(pbm->devhandle, | |
790 | ep->msi_data /* msi_num */, | |
791 | HV_MSISTATE_IDLE); | |
792 | ||
793 | /* Clear the entry. */ | |
794 | ep->version_type &= ~MSIQ_TYPE_MASK; | |
795 | ||
796 | /* Go to next entry in ring. */ | |
797 | head++; | |
798 | if (head >= pbm->msiq_ent_count) | |
799 | head = 0; | |
800 | ep = &base[head]; | |
801 | } | |
802 | ||
803 | if (likely(head != orig_head)) { | |
804 | /* ACK entries by updating head pointer. */ | |
805 | head *= sizeof(struct pci_sun4v_msiq_entry); | |
806 | err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); | |
807 | if (unlikely(err)) | |
808 | goto hv_error_set; | |
809 | } | |
810 | return; | |
811 | ||
812 | hv_error_set: | |
813 | printk(KERN_EMERG "MSI: Hypervisor set head gives error %lu\n", err); | |
814 | goto hv_error_cont; | |
815 | ||
816 | hv_error_get: | |
817 | printk(KERN_EMERG "MSI: Hypervisor get head gives error %lu\n", err); | |
818 | ||
819 | hv_error_cont: | |
820 | printk(KERN_EMERG "MSI: devhandle[%x] msiqid[%lx] head[%lu]\n", | |
821 | pbm->devhandle, msiqid, head); | |
822 | return; | |
823 | ||
824 | bad_offset: | |
825 | printk(KERN_EMERG "MSI: Hypervisor gives bad offset %lx max(%lx)\n", | |
826 | head, pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)); | |
827 | return; | |
828 | ||
829 | bad_type: | |
830 | printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type); | |
831 | return; | |
832 | } | |
833 | ||
834 | static int msi_bitmap_alloc(struct pci_pbm_info *pbm) | |
835 | { | |
836 | unsigned long size, bits_per_ulong; | |
837 | ||
838 | bits_per_ulong = sizeof(unsigned long) * 8; | |
839 | size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); | |
840 | size /= 8; | |
841 | BUG_ON(size % sizeof(unsigned long)); | |
842 | ||
843 | pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); | |
844 | if (!pbm->msi_bitmap) | |
845 | return -ENOMEM; | |
846 | ||
847 | return 0; | |
848 | } | |
849 | ||
850 | static void msi_bitmap_free(struct pci_pbm_info *pbm) | |
851 | { | |
852 | kfree(pbm->msi_bitmap); | |
853 | pbm->msi_bitmap = NULL; | |
854 | } | |
855 | ||
856 | static int msi_queue_alloc(struct pci_pbm_info *pbm) | |
857 | { | |
858 | unsigned long q_size, alloc_size, pages, order; | |
859 | int i; | |
860 | ||
861 | q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); | |
862 | alloc_size = (pbm->msiq_num * q_size); | |
863 | order = get_order(alloc_size); | |
864 | pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); | |
865 | if (pages == 0UL) { | |
866 | printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", | |
867 | order); | |
868 | return -ENOMEM; | |
869 | } | |
870 | memset((char *)pages, 0, PAGE_SIZE << order); | |
871 | pbm->msi_queues = (void *) pages; | |
872 | ||
873 | for (i = 0; i < pbm->msiq_num; i++) { | |
874 | unsigned long err, base = __pa(pages + (i * q_size)); | |
875 | unsigned long ret1, ret2; | |
876 | ||
877 | err = pci_sun4v_msiq_conf(pbm->devhandle, | |
878 | pbm->msiq_first + i, | |
879 | base, pbm->msiq_ent_count); | |
880 | if (err) { | |
881 | printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n", | |
882 | err); | |
883 | goto h_error; | |
884 | } | |
885 | ||
886 | err = pci_sun4v_msiq_info(pbm->devhandle, | |
887 | pbm->msiq_first + i, | |
888 | &ret1, &ret2); | |
889 | if (err) { | |
890 | printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n", | |
891 | err); | |
892 | goto h_error; | |
893 | } | |
894 | if (ret1 != base || ret2 != pbm->msiq_ent_count) { | |
895 | printk(KERN_ERR "MSI: Bogus qconf " | |
896 | "expected[%lx:%x] got[%lx:%lx]\n", | |
897 | base, pbm->msiq_ent_count, | |
898 | ret1, ret2); | |
899 | goto h_error; | |
900 | } | |
901 | } | |
902 | ||
903 | return 0; | |
904 | ||
905 | h_error: | |
906 | free_pages(pages, order); | |
907 | return -EINVAL; | |
908 | } | |
909 | ||
35a17eb6 DM |
910 | |
911 | static int alloc_msi(struct pci_pbm_info *pbm) | |
912 | { | |
913 | int i; | |
914 | ||
915 | for (i = 0; i < pbm->msi_num; i++) { | |
916 | if (!test_and_set_bit(i, pbm->msi_bitmap)) | |
917 | return i + pbm->msi_first; | |
918 | } | |
919 | ||
920 | return -ENOENT; | |
921 | } | |
922 | ||
923 | static void free_msi(struct pci_pbm_info *pbm, int msi_num) | |
924 | { | |
925 | msi_num -= pbm->msi_first; | |
926 | clear_bit(msi_num, pbm->msi_bitmap); | |
927 | } | |
928 | ||
929 | static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, | |
930 | struct pci_dev *pdev, | |
931 | struct msi_desc *entry) | |
932 | { | |
a2fb23af | 933 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
35a17eb6 DM |
934 | unsigned long devino, msiqid; |
935 | struct msi_msg msg; | |
936 | int msi_num, err; | |
937 | ||
938 | *virt_irq_p = 0; | |
939 | ||
940 | msi_num = alloc_msi(pbm); | |
941 | if (msi_num < 0) | |
942 | return msi_num; | |
943 | ||
944 | devino = sun4v_build_msi(pbm->devhandle, virt_irq_p, | |
945 | pbm->msiq_first_devino, | |
946 | (pbm->msiq_first_devino + | |
947 | pbm->msiq_num)); | |
948 | err = -ENOMEM; | |
949 | if (!devino) | |
950 | goto out_err; | |
951 | ||
35a17eb6 DM |
952 | msiqid = ((devino - pbm->msiq_first_devino) + |
953 | pbm->msiq_first); | |
954 | ||
955 | err = -EINVAL; | |
956 | if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) | |
957 | if (err) | |
958 | goto out_err; | |
959 | ||
960 | if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) | |
961 | goto out_err; | |
962 | ||
963 | if (pci_sun4v_msi_setmsiq(pbm->devhandle, | |
964 | msi_num, msiqid, | |
965 | (entry->msi_attrib.is_64 ? | |
966 | HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) | |
967 | goto out_err; | |
968 | ||
969 | if (pci_sun4v_msi_setstate(pbm->devhandle, msi_num, HV_MSISTATE_IDLE)) | |
970 | goto out_err; | |
971 | ||
972 | if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID)) | |
973 | goto out_err; | |
974 | ||
a2fb23af | 975 | pdev->dev.archdata.msi_num = msi_num; |
35a17eb6 DM |
976 | |
977 | if (entry->msi_attrib.is_64) { | |
978 | msg.address_hi = pbm->msi64_start >> 32; | |
979 | msg.address_lo = pbm->msi64_start & 0xffffffff; | |
980 | } else { | |
981 | msg.address_hi = 0; | |
982 | msg.address_lo = pbm->msi32_start; | |
983 | } | |
984 | msg.data = msi_num; | |
7fe3730d ME |
985 | |
986 | set_irq_msi(*virt_irq_p, entry); | |
35a17eb6 DM |
987 | write_msi_msg(*virt_irq_p, &msg); |
988 | ||
989 | irq_install_pre_handler(*virt_irq_p, | |
990 | pci_sun4v_msi_prehandler, | |
991 | pbm, (void *) msiqid); | |
992 | ||
993 | return 0; | |
994 | ||
995 | out_err: | |
996 | free_msi(pbm, msi_num); | |
997 | sun4v_destroy_msi(*virt_irq_p); | |
998 | *virt_irq_p = 0; | |
999 | return err; | |
1000 | ||
1001 | } | |
1002 | ||
1003 | static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, | |
1004 | struct pci_dev *pdev) | |
1005 | { | |
a2fb23af | 1006 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
35a17eb6 DM |
1007 | unsigned long msiqid, err; |
1008 | unsigned int msi_num; | |
1009 | ||
a2fb23af | 1010 | msi_num = pdev->dev.archdata.msi_num; |
35a17eb6 DM |
1011 | err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid); |
1012 | if (err) { | |
1013 | printk(KERN_ERR "%s: getmsiq gives error %lu\n", | |
1014 | pbm->name, err); | |
1015 | return; | |
1016 | } | |
1017 | ||
1018 | pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_INVALID); | |
1019 | pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_INVALID); | |
1020 | ||
1021 | free_msi(pbm, msi_num); | |
1022 | ||
1023 | /* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ | |
1024 | * allocation. | |
1025 | */ | |
1026 | sun4v_destroy_msi(virt_irq); | |
1027 | } | |
e9870c4c DM |
1028 | |
1029 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | |
1030 | { | |
1031 | const u32 *val; | |
1032 | int len; | |
1033 | ||
1034 | val = of_get_property(pbm->prom_node, "#msi-eqs", &len); | |
1035 | if (!val || len != 4) | |
1036 | goto no_msi; | |
1037 | pbm->msiq_num = *val; | |
1038 | if (pbm->msiq_num) { | |
1039 | const struct msiq_prop { | |
1040 | u32 first_msiq; | |
1041 | u32 num_msiq; | |
1042 | u32 first_devino; | |
1043 | } *mqp; | |
1044 | const struct msi_range_prop { | |
1045 | u32 first_msi; | |
1046 | u32 num_msi; | |
1047 | } *mrng; | |
1048 | const struct addr_range_prop { | |
1049 | u32 msi32_high; | |
1050 | u32 msi32_low; | |
1051 | u32 msi32_len; | |
1052 | u32 msi64_high; | |
1053 | u32 msi64_low; | |
1054 | u32 msi64_len; | |
1055 | } *arng; | |
1056 | ||
1057 | val = of_get_property(pbm->prom_node, "msi-eq-size", &len); | |
1058 | if (!val || len != 4) | |
1059 | goto no_msi; | |
1060 | ||
1061 | pbm->msiq_ent_count = *val; | |
1062 | ||
1063 | mqp = of_get_property(pbm->prom_node, | |
1064 | "msi-eq-to-devino", &len); | |
1065 | if (!mqp || len != sizeof(struct msiq_prop)) | |
1066 | goto no_msi; | |
1067 | ||
1068 | pbm->msiq_first = mqp->first_msiq; | |
1069 | pbm->msiq_first_devino = mqp->first_devino; | |
1070 | ||
1071 | val = of_get_property(pbm->prom_node, "#msi", &len); | |
1072 | if (!val || len != 4) | |
1073 | goto no_msi; | |
1074 | pbm->msi_num = *val; | |
1075 | ||
1076 | mrng = of_get_property(pbm->prom_node, "msi-ranges", &len); | |
1077 | if (!mrng || len != sizeof(struct msi_range_prop)) | |
1078 | goto no_msi; | |
1079 | pbm->msi_first = mrng->first_msi; | |
1080 | ||
1081 | val = of_get_property(pbm->prom_node, "msi-data-mask", &len); | |
1082 | if (!val || len != 4) | |
1083 | goto no_msi; | |
1084 | pbm->msi_data_mask = *val; | |
1085 | ||
1086 | val = of_get_property(pbm->prom_node, "msix-data-width", &len); | |
1087 | if (!val || len != 4) | |
1088 | goto no_msi; | |
1089 | pbm->msix_data_width = *val; | |
1090 | ||
1091 | arng = of_get_property(pbm->prom_node, "msi-address-ranges", | |
1092 | &len); | |
1093 | if (!arng || len != sizeof(struct addr_range_prop)) | |
1094 | goto no_msi; | |
1095 | pbm->msi32_start = ((u64)arng->msi32_high << 32) | | |
1096 | (u64) arng->msi32_low; | |
1097 | pbm->msi64_start = ((u64)arng->msi64_high << 32) | | |
1098 | (u64) arng->msi64_low; | |
1099 | pbm->msi32_len = arng->msi32_len; | |
1100 | pbm->msi64_len = arng->msi64_len; | |
1101 | ||
1102 | if (msi_bitmap_alloc(pbm)) | |
1103 | goto no_msi; | |
1104 | ||
1105 | if (msi_queue_alloc(pbm)) { | |
1106 | msi_bitmap_free(pbm); | |
1107 | goto no_msi; | |
1108 | } | |
1109 | ||
1110 | printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] " | |
1111 | "devino[0x%x]\n", | |
1112 | pbm->name, | |
1113 | pbm->msiq_first, pbm->msiq_num, | |
1114 | pbm->msiq_ent_count, | |
1115 | pbm->msiq_first_devino); | |
1116 | printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] " | |
1117 | "width[%u]\n", | |
1118 | pbm->name, | |
1119 | pbm->msi_first, pbm->msi_num, pbm->msi_data_mask, | |
1120 | pbm->msix_data_width); | |
1121 | printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] " | |
1122 | "addr64[0x%lx:0x%x]\n", | |
1123 | pbm->name, | |
1124 | pbm->msi32_start, pbm->msi32_len, | |
1125 | pbm->msi64_start, pbm->msi64_len); | |
1126 | printk(KERN_INFO "%s: MSI queues at RA [%p]\n", | |
1127 | pbm->name, | |
1128 | pbm->msi_queues); | |
1129 | } | |
1130 | pbm->setup_msi_irq = pci_sun4v_setup_msi_irq; | |
1131 | pbm->teardown_msi_irq = pci_sun4v_teardown_msi_irq; | |
1132 | ||
1133 | return; | |
1134 | ||
1135 | no_msi: | |
1136 | pbm->msiq_num = 0; | |
1137 | printk(KERN_INFO "%s: No MSI support.\n", pbm->name); | |
1138 | } | |
35a17eb6 DM |
1139 | #else /* CONFIG_PCI_MSI */ |
1140 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | |
1141 | { | |
1142 | } | |
1143 | #endif /* !(CONFIG_PCI_MSI) */ | |
1144 | ||
e87dc350 | 1145 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) |
bade5622 DM |
1146 | { |
1147 | struct pci_pbm_info *pbm; | |
bade5622 | 1148 | |
3833789b DM |
1149 | if (devhandle & 0x40) |
1150 | pbm = &p->pbm_B; | |
1151 | else | |
1152 | pbm = &p->pbm_A; | |
bade5622 | 1153 | |
34768bc8 DM |
1154 | pbm->next = pci_pbm_root; |
1155 | pci_pbm_root = pbm; | |
1156 | ||
1157 | pbm->scan_bus = pci_sun4v_scan_bus; | |
ca3dd88e DM |
1158 | pbm->pci_ops = &sun4v_pci_ops; |
1159 | pbm->config_space_reg_bits = 12; | |
34768bc8 | 1160 | |
6c108f12 DM |
1161 | pbm->index = pci_num_pbms++; |
1162 | ||
bade5622 | 1163 | pbm->parent = p; |
e87dc350 | 1164 | pbm->prom_node = dp; |
bade5622 | 1165 | |
3833789b | 1166 | pbm->devhandle = devhandle; |
bade5622 | 1167 | |
e87dc350 | 1168 | pbm->name = dp->full_name; |
bade5622 | 1169 | |
e87dc350 | 1170 | printk("%s: SUN4V PCI Bus Module\n", pbm->name); |
bade5622 | 1171 | |
9fd8b647 | 1172 | pci_determine_mem_io_space(pbm); |
bade5622 | 1173 | |
cfa0652c | 1174 | pci_get_pbm_props(pbm); |
bade5622 | 1175 | pci_sun4v_iommu_init(pbm); |
35a17eb6 | 1176 | pci_sun4v_msi_init(pbm); |
bade5622 DM |
1177 | } |
1178 | ||
e87dc350 | 1179 | void sun4v_pci_init(struct device_node *dp, char *model_name) |
8f6a93a1 | 1180 | { |
bade5622 | 1181 | struct pci_controller_info *p; |
34768bc8 | 1182 | struct pci_pbm_info *pbm; |
16ce82d8 | 1183 | struct iommu *iommu; |
e87dc350 DM |
1184 | struct property *prop; |
1185 | struct linux_prom64_registers *regs; | |
7c8f486a DM |
1186 | u32 devhandle; |
1187 | int i; | |
3833789b | 1188 | |
e87dc350 DM |
1189 | prop = of_find_property(dp, "reg", NULL); |
1190 | regs = prop->value; | |
1191 | ||
1192 | devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; | |
3833789b | 1193 | |
34768bc8 | 1194 | for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { |
0b522497 | 1195 | if (pbm->devhandle == (devhandle ^ 0x40)) { |
34768bc8 | 1196 | pci_sun4v_pbm_init(pbm->parent, dp, devhandle); |
0b522497 DM |
1197 | return; |
1198 | } | |
3833789b | 1199 | } |
bade5622 | 1200 | |
a283a525 | 1201 | for_each_possible_cpu(i) { |
7c8f486a DM |
1202 | unsigned long page = get_zeroed_page(GFP_ATOMIC); |
1203 | ||
1204 | if (!page) | |
1205 | goto fatal_memory_error; | |
1206 | ||
6a32fd4d | 1207 | per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; |
bade5622 | 1208 | } |
7c8f486a | 1209 | |
982c2064 | 1210 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); |
7c8f486a DM |
1211 | if (!p) |
1212 | goto fatal_memory_error; | |
1213 | ||
16ce82d8 | 1214 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
7c8f486a DM |
1215 | if (!iommu) |
1216 | goto fatal_memory_error; | |
1217 | ||
bade5622 DM |
1218 | p->pbm_A.iommu = iommu; |
1219 | ||
16ce82d8 | 1220 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
7c8f486a DM |
1221 | if (!iommu) |
1222 | goto fatal_memory_error; | |
1223 | ||
bade5622 DM |
1224 | p->pbm_B.iommu = iommu; |
1225 | ||
bade5622 DM |
1226 | /* Like PSYCHO and SCHIZO we have a 2GB aligned area |
1227 | * for memory space. | |
1228 | */ | |
1229 | pci_memspace_mask = 0x7fffffffUL; | |
1230 | ||
e87dc350 | 1231 | pci_sun4v_pbm_init(p, dp, devhandle); |
7c8f486a DM |
1232 | return; |
1233 | ||
1234 | fatal_memory_error: | |
1235 | prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); | |
1236 | prom_halt(); | |
8f6a93a1 | 1237 | } |