]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/sparc64/kernel/sbus.c
d403f3df2dc28b22ed155cafd48e2ce7e51699bc
[mirror_ubuntu-zesty-kernel.git] / arch / sparc64 / kernel / sbus.c
1 /* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
2 * sbus.c: UltraSparc SBUS controller support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14
15 #include <asm/page.h>
16 #include <asm/sbus.h>
17 #include <asm/io.h>
18 #include <asm/upa.h>
19 #include <asm/cache.h>
20 #include <asm/dma.h>
21 #include <asm/irq.h>
22 #include <asm/prom.h>
23 #include <asm/starfire.h>
24
25 #include "iommu_common.h"
26
27 #define MAP_BASE ((u32)0xc0000000)
28
29 struct sbus_iommu_arena {
30 unsigned long *map;
31 unsigned int hint;
32 unsigned int limit;
33 };
34
35 struct sbus_iommu {
36 spinlock_t lock;
37
38 struct sbus_iommu_arena arena;
39
40 iopte_t *page_table;
41 unsigned long strbuf_regs;
42 unsigned long iommu_regs;
43 unsigned long sbus_control_reg;
44
45 volatile unsigned long strbuf_flushflag;
46 };
47
48 /* Offsets from iommu_regs */
49 #define SYSIO_IOMMUREG_BASE 0x2400UL
50 #define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
51 #define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
52 #define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
53 #define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
54 #define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
55 #define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
56 #define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
57 #define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
58
59 #define IOMMU_DRAM_VALID (1UL << 30UL)
60
61 static void __iommu_flushall(struct sbus_iommu *iommu)
62 {
63 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
64 int entry;
65
66 for (entry = 0; entry < 16; entry++) {
67 upa_writeq(0, tag);
68 tag += 8UL;
69 }
70 upa_readq(iommu->sbus_control_reg);
71 }
72
73 /* Offsets from strbuf_regs */
74 #define SYSIO_STRBUFREG_BASE 0x2800UL
75 #define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
76 #define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
77 #define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
78 #define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
79 #define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
80 #define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
81 #define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
82
83 #define STRBUF_TAG_VALID 0x02UL
84
85 static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction)
86 {
87 unsigned long n;
88 int limit;
89
90 n = npages;
91 while (n--)
92 upa_writeq(base + (n << IO_PAGE_SHIFT),
93 iommu->strbuf_regs + STRBUF_PFLUSH);
94
95 /* If the device could not have possibly put dirty data into
96 * the streaming cache, no flush-flag synchronization needs
97 * to be performed.
98 */
99 if (direction == SBUS_DMA_TODEVICE)
100 return;
101
102 iommu->strbuf_flushflag = 0UL;
103
104 /* Whoopee cushion! */
105 upa_writeq(__pa(&iommu->strbuf_flushflag),
106 iommu->strbuf_regs + STRBUF_FSYNC);
107 upa_readq(iommu->sbus_control_reg);
108
109 limit = 100000;
110 while (iommu->strbuf_flushflag == 0UL) {
111 limit--;
112 if (!limit)
113 break;
114 udelay(1);
115 rmb();
116 }
117 if (!limit)
118 printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
119 "vaddr[%08x] npages[%ld]\n",
120 base, npages);
121 }
122
123 /* Based largely upon the ppc64 iommu allocator. */
124 static long sbus_arena_alloc(struct sbus_iommu *iommu, unsigned long npages)
125 {
126 struct sbus_iommu_arena *arena = &iommu->arena;
127 unsigned long n, i, start, end, limit;
128 int pass;
129
130 limit = arena->limit;
131 start = arena->hint;
132 pass = 0;
133
134 again:
135 n = find_next_zero_bit(arena->map, limit, start);
136 end = n + npages;
137 if (unlikely(end >= limit)) {
138 if (likely(pass < 1)) {
139 limit = start;
140 start = 0;
141 __iommu_flushall(iommu);
142 pass++;
143 goto again;
144 } else {
145 /* Scanned the whole thing, give up. */
146 return -1;
147 }
148 }
149
150 for (i = n; i < end; i++) {
151 if (test_bit(i, arena->map)) {
152 start = i + 1;
153 goto again;
154 }
155 }
156
157 for (i = n; i < end; i++)
158 __set_bit(i, arena->map);
159
160 arena->hint = end;
161
162 return n;
163 }
164
165 static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base, unsigned long npages)
166 {
167 unsigned long i;
168
169 for (i = base; i < (base + npages); i++)
170 __clear_bit(i, arena->map);
171 }
172
173 static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize)
174 {
175 unsigned long tsbbase, order, sz, num_tsb_entries;
176
177 num_tsb_entries = tsbsize / sizeof(iopte_t);
178
179 /* Setup initial software IOMMU state. */
180 spin_lock_init(&iommu->lock);
181
182 /* Allocate and initialize the free area map. */
183 sz = num_tsb_entries / 8;
184 sz = (sz + 7UL) & ~7UL;
185 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
186 if (!iommu->arena.map) {
187 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
188 prom_halt();
189 }
190 iommu->arena.limit = num_tsb_entries;
191
192 /* Now allocate and setup the IOMMU page table itself. */
193 order = get_order(tsbsize);
194 tsbbase = __get_free_pages(GFP_KERNEL, order);
195 if (!tsbbase) {
196 prom_printf("IOMMU: Error, gfp(tsb) failed.\n");
197 prom_halt();
198 }
199 iommu->page_table = (iopte_t *)tsbbase;
200 memset(iommu->page_table, 0, tsbsize);
201 }
202
203 static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npages)
204 {
205 long entry;
206
207 entry = sbus_arena_alloc(iommu, npages);
208 if (unlikely(entry < 0))
209 return NULL;
210
211 return iommu->page_table + entry;
212 }
213
214 static inline void free_npages(struct sbus_iommu *iommu, dma_addr_t base, unsigned long npages)
215 {
216 sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
217 }
218
219 void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
220 {
221 struct sbus_iommu *iommu;
222 iopte_t *iopte;
223 unsigned long flags, order, first_page;
224 void *ret;
225 int npages;
226
227 size = IO_PAGE_ALIGN(size);
228 order = get_order(size);
229 if (order >= 10)
230 return NULL;
231
232 first_page = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
233 if (first_page == 0UL)
234 return NULL;
235 memset((char *)first_page, 0, PAGE_SIZE << order);
236
237 iommu = sdev->bus->iommu;
238
239 spin_lock_irqsave(&iommu->lock, flags);
240 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
241 spin_unlock_irqrestore(&iommu->lock, flags);
242
243 if (unlikely(iopte == NULL)) {
244 free_pages(first_page, order);
245 return NULL;
246 }
247
248 *dvma_addr = (MAP_BASE +
249 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
250 ret = (void *) first_page;
251 npages = size >> IO_PAGE_SHIFT;
252 first_page = __pa(first_page);
253 while (npages--) {
254 iopte_val(*iopte) = (IOPTE_VALID | IOPTE_CACHE |
255 IOPTE_WRITE |
256 (first_page & IOPTE_PAGE));
257 iopte++;
258 first_page += IO_PAGE_SIZE;
259 }
260
261 return ret;
262 }
263
264 void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
265 {
266 struct sbus_iommu *iommu;
267 iopte_t *iopte;
268 unsigned long flags, order, npages;
269
270 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
271 iommu = sdev->bus->iommu;
272 iopte = iommu->page_table +
273 ((dvma - MAP_BASE) >> IO_PAGE_SHIFT);
274
275 spin_lock_irqsave(&iommu->lock, flags);
276
277 free_npages(iommu, dvma - MAP_BASE, npages);
278
279 spin_unlock_irqrestore(&iommu->lock, flags);
280
281 order = get_order(size);
282 if (order < 10)
283 free_pages((unsigned long)cpu, order);
284 }
285
286 dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
287 {
288 struct sbus_iommu *iommu;
289 iopte_t *base;
290 unsigned long flags, npages, oaddr;
291 unsigned long i, base_paddr;
292 u32 bus_addr, ret;
293 unsigned long iopte_protection;
294
295 iommu = sdev->bus->iommu;
296
297 if (unlikely(direction == SBUS_DMA_NONE))
298 BUG();
299
300 oaddr = (unsigned long)ptr;
301 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
302 npages >>= IO_PAGE_SHIFT;
303
304 spin_lock_irqsave(&iommu->lock, flags);
305 base = alloc_npages(iommu, npages);
306 spin_unlock_irqrestore(&iommu->lock, flags);
307
308 if (unlikely(!base))
309 BUG();
310
311 bus_addr = (MAP_BASE +
312 ((base - iommu->page_table) << IO_PAGE_SHIFT));
313 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
314 base_paddr = __pa(oaddr & IO_PAGE_MASK);
315
316 iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
317 if (direction != SBUS_DMA_TODEVICE)
318 iopte_protection |= IOPTE_WRITE;
319
320 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
321 iopte_val(*base) = iopte_protection | base_paddr;
322
323 return ret;
324 }
325
326 void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
327 {
328 struct sbus_iommu *iommu = sdev->bus->iommu;
329 iopte_t *base;
330 unsigned long flags, npages, i;
331
332 if (unlikely(direction == SBUS_DMA_NONE))
333 BUG();
334
335 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
336 npages >>= IO_PAGE_SHIFT;
337 base = iommu->page_table +
338 ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT);
339
340 bus_addr &= IO_PAGE_MASK;
341
342 spin_lock_irqsave(&iommu->lock, flags);
343 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
344 for (i = 0; i < npages; i++)
345 iopte_val(base[i]) = 0UL;
346 free_npages(iommu, bus_addr - MAP_BASE, npages);
347 spin_unlock_irqrestore(&iommu->lock, flags);
348 }
349
350 #define SG_ENT_PHYS_ADDRESS(SG) \
351 (__pa(page_address((SG)->page)) + (SG)->offset)
352
353 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
354 int nused, int nelems, unsigned long iopte_protection)
355 {
356 struct scatterlist *dma_sg = sg;
357 struct scatterlist *sg_end = sg + nelems;
358 int i;
359
360 for (i = 0; i < nused; i++) {
361 unsigned long pteval = ~0UL;
362 u32 dma_npages;
363
364 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
365 dma_sg->dma_length +
366 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
367 do {
368 unsigned long offset;
369 signed int len;
370
371 /* If we are here, we know we have at least one
372 * more page to map. So walk forward until we
373 * hit a page crossing, and begin creating new
374 * mappings from that spot.
375 */
376 for (;;) {
377 unsigned long tmp;
378
379 tmp = SG_ENT_PHYS_ADDRESS(sg);
380 len = sg->length;
381 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
382 pteval = tmp & IO_PAGE_MASK;
383 offset = tmp & (IO_PAGE_SIZE - 1UL);
384 break;
385 }
386 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
387 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
388 offset = 0UL;
389 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
390 break;
391 }
392 sg++;
393 }
394
395 pteval = iopte_protection | (pteval & IOPTE_PAGE);
396 while (len > 0) {
397 *iopte++ = __iopte(pteval);
398 pteval += IO_PAGE_SIZE;
399 len -= (IO_PAGE_SIZE - offset);
400 offset = 0;
401 dma_npages--;
402 }
403
404 pteval = (pteval & IOPTE_PAGE) + len;
405 sg++;
406
407 /* Skip over any tail mappings we've fully mapped,
408 * adjusting pteval along the way. Stop when we
409 * detect a page crossing event.
410 */
411 while (sg < sg_end &&
412 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
413 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
414 ((pteval ^
415 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
416 pteval += sg->length;
417 sg++;
418 }
419 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
420 pteval = ~0UL;
421 } while (dma_npages != 0);
422 dma_sg++;
423 }
424 }
425
426 int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
427 {
428 struct sbus_iommu *iommu;
429 unsigned long flags, npages, iopte_protection;
430 iopte_t *base;
431 u32 dma_base;
432 struct scatterlist *sgtmp;
433 int used;
434
435 /* Fast path single entry scatterlists. */
436 if (nelems == 1) {
437 sglist->dma_address =
438 sbus_map_single(sdev,
439 (page_address(sglist->page) + sglist->offset),
440 sglist->length, direction);
441 sglist->dma_length = sglist->length;
442 return 1;
443 }
444
445 iommu = sdev->bus->iommu;
446
447 if (unlikely(direction == SBUS_DMA_NONE))
448 BUG();
449
450 npages = prepare_sg(sglist, nelems);
451
452 spin_lock_irqsave(&iommu->lock, flags);
453 base = alloc_npages(iommu, npages);
454 spin_unlock_irqrestore(&iommu->lock, flags);
455
456 if (unlikely(base == NULL))
457 BUG();
458
459 dma_base = MAP_BASE +
460 ((base - iommu->page_table) << IO_PAGE_SHIFT);
461
462 /* Normalize DVMA addresses. */
463 used = nelems;
464
465 sgtmp = sglist;
466 while (used && sgtmp->dma_length) {
467 sgtmp->dma_address += dma_base;
468 sgtmp++;
469 used--;
470 }
471 used = nelems - used;
472
473 iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
474 if (direction != SBUS_DMA_TODEVICE)
475 iopte_protection |= IOPTE_WRITE;
476
477 fill_sg(base, sglist, used, nelems, iopte_protection);
478
479 #ifdef VERIFY_SG
480 verify_sglist(sglist, nelems, base, npages);
481 #endif
482
483 return used;
484 }
485
486 void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
487 {
488 struct sbus_iommu *iommu;
489 iopte_t *base;
490 unsigned long flags, i, npages;
491 u32 bus_addr;
492
493 if (unlikely(direction == SBUS_DMA_NONE))
494 BUG();
495
496 iommu = sdev->bus->iommu;
497
498 bus_addr = sglist->dma_address & IO_PAGE_MASK;
499
500 for (i = 1; i < nelems; i++)
501 if (sglist[i].dma_length == 0)
502 break;
503 i--;
504 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
505 bus_addr) >> IO_PAGE_SHIFT;
506
507 base = iommu->page_table +
508 ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT);
509
510 spin_lock_irqsave(&iommu->lock, flags);
511 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
512 for (i = 0; i < npages; i++)
513 iopte_val(base[i]) = 0UL;
514 free_npages(iommu, bus_addr - MAP_BASE, npages);
515 spin_unlock_irqrestore(&iommu->lock, flags);
516 }
517
518 void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
519 {
520 struct sbus_iommu *iommu;
521 unsigned long flags, npages;
522
523 iommu = sdev->bus->iommu;
524
525 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
526 npages >>= IO_PAGE_SHIFT;
527 bus_addr &= IO_PAGE_MASK;
528
529 spin_lock_irqsave(&iommu->lock, flags);
530 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
531 spin_unlock_irqrestore(&iommu->lock, flags);
532 }
533
534 void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
535 {
536 }
537
538 void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
539 {
540 struct sbus_iommu *iommu;
541 unsigned long flags, npages, i;
542 u32 bus_addr;
543
544 iommu = sdev->bus->iommu;
545
546 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
547 for (i = 0; i < nelems; i++) {
548 if (!sglist[i].dma_length)
549 break;
550 }
551 i--;
552 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
553 - bus_addr) >> IO_PAGE_SHIFT;
554
555 spin_lock_irqsave(&iommu->lock, flags);
556 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
557 spin_unlock_irqrestore(&iommu->lock, flags);
558 }
559
560 void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
561 {
562 }
563
564 /* Enable 64-bit DVMA mode for the given device. */
565 void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
566 {
567 struct sbus_iommu *iommu = sdev->bus->iommu;
568 int slot = sdev->slot;
569 unsigned long cfg_reg;
570 u64 val;
571
572 cfg_reg = iommu->sbus_control_reg;
573 switch (slot) {
574 case 0:
575 cfg_reg += 0x20UL;
576 break;
577 case 1:
578 cfg_reg += 0x28UL;
579 break;
580 case 2:
581 cfg_reg += 0x30UL;
582 break;
583 case 3:
584 cfg_reg += 0x38UL;
585 break;
586 case 13:
587 cfg_reg += 0x40UL;
588 break;
589 case 14:
590 cfg_reg += 0x48UL;
591 break;
592 case 15:
593 cfg_reg += 0x50UL;
594 break;
595
596 default:
597 return;
598 };
599
600 val = upa_readq(cfg_reg);
601 if (val & (1UL << 14UL)) {
602 /* Extended transfer mode already enabled. */
603 return;
604 }
605
606 val |= (1UL << 14UL);
607
608 if (bursts & DMA_BURST8)
609 val |= (1UL << 1UL);
610 if (bursts & DMA_BURST16)
611 val |= (1UL << 2UL);
612 if (bursts & DMA_BURST32)
613 val |= (1UL << 3UL);
614 if (bursts & DMA_BURST64)
615 val |= (1UL << 4UL);
616 upa_writeq(val, cfg_reg);
617 }
618
619 /* INO number to IMAP register offset for SYSIO external IRQ's.
620 * This should conform to both Sunfire/Wildfire server and Fusion
621 * desktop designs.
622 */
623 #define SYSIO_IMAP_SLOT0 0x2c04UL
624 #define SYSIO_IMAP_SLOT1 0x2c0cUL
625 #define SYSIO_IMAP_SLOT2 0x2c14UL
626 #define SYSIO_IMAP_SLOT3 0x2c1cUL
627 #define SYSIO_IMAP_SCSI 0x3004UL
628 #define SYSIO_IMAP_ETH 0x300cUL
629 #define SYSIO_IMAP_BPP 0x3014UL
630 #define SYSIO_IMAP_AUDIO 0x301cUL
631 #define SYSIO_IMAP_PFAIL 0x3024UL
632 #define SYSIO_IMAP_KMS 0x302cUL
633 #define SYSIO_IMAP_FLPY 0x3034UL
634 #define SYSIO_IMAP_SHW 0x303cUL
635 #define SYSIO_IMAP_KBD 0x3044UL
636 #define SYSIO_IMAP_MS 0x304cUL
637 #define SYSIO_IMAP_SER 0x3054UL
638 #define SYSIO_IMAP_TIM0 0x3064UL
639 #define SYSIO_IMAP_TIM1 0x306cUL
640 #define SYSIO_IMAP_UE 0x3074UL
641 #define SYSIO_IMAP_CE 0x307cUL
642 #define SYSIO_IMAP_SBERR 0x3084UL
643 #define SYSIO_IMAP_PMGMT 0x308cUL
644 #define SYSIO_IMAP_GFX 0x3094UL
645 #define SYSIO_IMAP_EUPA 0x309cUL
646
647 #define bogon ((unsigned long) -1)
648 static unsigned long sysio_irq_offsets[] = {
649 /* SBUS Slot 0 --> 3, level 1 --> 7 */
650 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
651 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
652 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
653 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
654 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
655 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
656 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
657 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
658
659 /* Onboard devices (not relevant/used on SunFire). */
660 SYSIO_IMAP_SCSI,
661 SYSIO_IMAP_ETH,
662 SYSIO_IMAP_BPP,
663 bogon,
664 SYSIO_IMAP_AUDIO,
665 SYSIO_IMAP_PFAIL,
666 bogon,
667 bogon,
668 SYSIO_IMAP_KMS,
669 SYSIO_IMAP_FLPY,
670 SYSIO_IMAP_SHW,
671 SYSIO_IMAP_KBD,
672 SYSIO_IMAP_MS,
673 SYSIO_IMAP_SER,
674 bogon,
675 bogon,
676 SYSIO_IMAP_TIM0,
677 SYSIO_IMAP_TIM1,
678 bogon,
679 bogon,
680 SYSIO_IMAP_UE,
681 SYSIO_IMAP_CE,
682 SYSIO_IMAP_SBERR,
683 SYSIO_IMAP_PMGMT,
684 };
685
686 #undef bogon
687
688 #define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
689
690 /* Convert Interrupt Mapping register pointer to associated
691 * Interrupt Clear register pointer, SYSIO specific version.
692 */
693 #define SYSIO_ICLR_UNUSED0 0x3400UL
694 #define SYSIO_ICLR_SLOT0 0x340cUL
695 #define SYSIO_ICLR_SLOT1 0x344cUL
696 #define SYSIO_ICLR_SLOT2 0x348cUL
697 #define SYSIO_ICLR_SLOT3 0x34ccUL
698 static unsigned long sysio_imap_to_iclr(unsigned long imap)
699 {
700 unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
701 return imap + diff;
702 }
703
704 unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
705 {
706 struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
707 struct sbus_iommu *iommu = sbus->iommu;
708 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
709 unsigned long imap, iclr;
710 int sbus_level = 0;
711
712 imap = sysio_irq_offsets[ino];
713 if (imap == ((unsigned long)-1)) {
714 prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
715 ino);
716 prom_halt();
717 }
718 imap += reg_base;
719
720 /* SYSIO inconsistency. For external SLOTS, we have to select
721 * the right ICLR register based upon the lower SBUS irq level
722 * bits.
723 */
724 if (ino >= 0x20) {
725 iclr = sysio_imap_to_iclr(imap);
726 } else {
727 int sbus_slot = (ino & 0x18)>>3;
728
729 sbus_level = ino & 0x7;
730
731 switch(sbus_slot) {
732 case 0:
733 iclr = reg_base + SYSIO_ICLR_SLOT0;
734 break;
735 case 1:
736 iclr = reg_base + SYSIO_ICLR_SLOT1;
737 break;
738 case 2:
739 iclr = reg_base + SYSIO_ICLR_SLOT2;
740 break;
741 default:
742 case 3:
743 iclr = reg_base + SYSIO_ICLR_SLOT3;
744 break;
745 };
746
747 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
748 }
749 return build_irq(sbus_level, iclr, imap);
750 }
751
752 /* Error interrupt handling. */
753 #define SYSIO_UE_AFSR 0x0030UL
754 #define SYSIO_UE_AFAR 0x0038UL
755 #define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
756 #define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
757 #define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
758 #define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
759 #define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
760 #define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
761 #define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
762 #define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
763 #define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
764 #define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
765 #define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
766 static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
767 {
768 struct sbus_bus *sbus = dev_id;
769 struct sbus_iommu *iommu = sbus->iommu;
770 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
771 unsigned long afsr_reg, afar_reg;
772 unsigned long afsr, afar, error_bits;
773 int reported;
774
775 afsr_reg = reg_base + SYSIO_UE_AFSR;
776 afar_reg = reg_base + SYSIO_UE_AFAR;
777
778 /* Latch error status. */
779 afsr = upa_readq(afsr_reg);
780 afar = upa_readq(afar_reg);
781
782 /* Clear primary/secondary error status bits. */
783 error_bits = afsr &
784 (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
785 SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
786 upa_writeq(error_bits, afsr_reg);
787
788 /* Log the error. */
789 printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
790 sbus->portid,
791 (((error_bits & SYSIO_UEAFSR_PPIO) ?
792 "PIO" :
793 ((error_bits & SYSIO_UEAFSR_PDRD) ?
794 "DVMA Read" :
795 ((error_bits & SYSIO_UEAFSR_PDWR) ?
796 "DVMA Write" : "???")))));
797 printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
798 sbus->portid,
799 (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
800 (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
801 (afsr & SYSIO_UEAFSR_MID) >> 37UL);
802 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
803 printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
804 reported = 0;
805 if (afsr & SYSIO_UEAFSR_SPIO) {
806 reported++;
807 printk("(PIO)");
808 }
809 if (afsr & SYSIO_UEAFSR_SDRD) {
810 reported++;
811 printk("(DVMA Read)");
812 }
813 if (afsr & SYSIO_UEAFSR_SDWR) {
814 reported++;
815 printk("(DVMA Write)");
816 }
817 if (!reported)
818 printk("(none)");
819 printk("]\n");
820
821 return IRQ_HANDLED;
822 }
823
824 #define SYSIO_CE_AFSR 0x0040UL
825 #define SYSIO_CE_AFAR 0x0048UL
826 #define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
827 #define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
828 #define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
829 #define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
830 #define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
831 #define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
832 #define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
833 #define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
834 #define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
835 #define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
836 #define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
837 #define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
838 static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
839 {
840 struct sbus_bus *sbus = dev_id;
841 struct sbus_iommu *iommu = sbus->iommu;
842 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
843 unsigned long afsr_reg, afar_reg;
844 unsigned long afsr, afar, error_bits;
845 int reported;
846
847 afsr_reg = reg_base + SYSIO_CE_AFSR;
848 afar_reg = reg_base + SYSIO_CE_AFAR;
849
850 /* Latch error status. */
851 afsr = upa_readq(afsr_reg);
852 afar = upa_readq(afar_reg);
853
854 /* Clear primary/secondary error status bits. */
855 error_bits = afsr &
856 (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
857 SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
858 upa_writeq(error_bits, afsr_reg);
859
860 printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
861 sbus->portid,
862 (((error_bits & SYSIO_CEAFSR_PPIO) ?
863 "PIO" :
864 ((error_bits & SYSIO_CEAFSR_PDRD) ?
865 "DVMA Read" :
866 ((error_bits & SYSIO_CEAFSR_PDWR) ?
867 "DVMA Write" : "???")))));
868
869 /* XXX Use syndrome and afar to print out module string just like
870 * XXX UDB CE trap handler does... -DaveM
871 */
872 printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
873 sbus->portid,
874 (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
875 (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
876 (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
877 (afsr & SYSIO_CEAFSR_MID) >> 37UL);
878 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
879
880 printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
881 reported = 0;
882 if (afsr & SYSIO_CEAFSR_SPIO) {
883 reported++;
884 printk("(PIO)");
885 }
886 if (afsr & SYSIO_CEAFSR_SDRD) {
887 reported++;
888 printk("(DVMA Read)");
889 }
890 if (afsr & SYSIO_CEAFSR_SDWR) {
891 reported++;
892 printk("(DVMA Write)");
893 }
894 if (!reported)
895 printk("(none)");
896 printk("]\n");
897
898 return IRQ_HANDLED;
899 }
900
901 #define SYSIO_SBUS_AFSR 0x2010UL
902 #define SYSIO_SBUS_AFAR 0x2018UL
903 #define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
904 #define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
905 #define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
906 #define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
907 #define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
908 #define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
909 #define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
910 #define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
911 #define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
912 #define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
913 #define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
914 #define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
915 static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
916 {
917 struct sbus_bus *sbus = dev_id;
918 struct sbus_iommu *iommu = sbus->iommu;
919 unsigned long afsr_reg, afar_reg, reg_base;
920 unsigned long afsr, afar, error_bits;
921 int reported;
922
923 reg_base = iommu->sbus_control_reg - 0x2000UL;
924 afsr_reg = reg_base + SYSIO_SBUS_AFSR;
925 afar_reg = reg_base + SYSIO_SBUS_AFAR;
926
927 afsr = upa_readq(afsr_reg);
928 afar = upa_readq(afar_reg);
929
930 /* Clear primary/secondary error status bits. */
931 error_bits = afsr &
932 (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
933 SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
934 upa_writeq(error_bits, afsr_reg);
935
936 /* Log the error. */
937 printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
938 sbus->portid,
939 (((error_bits & SYSIO_SBAFSR_PLE) ?
940 "Late PIO Error" :
941 ((error_bits & SYSIO_SBAFSR_PTO) ?
942 "Time Out" :
943 ((error_bits & SYSIO_SBAFSR_PBERR) ?
944 "Error Ack" : "???")))),
945 (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
946 printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
947 sbus->portid,
948 (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
949 (afsr & SYSIO_SBAFSR_MID) >> 37UL);
950 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
951 printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
952 reported = 0;
953 if (afsr & SYSIO_SBAFSR_SLE) {
954 reported++;
955 printk("(Late PIO Error)");
956 }
957 if (afsr & SYSIO_SBAFSR_STO) {
958 reported++;
959 printk("(Time Out)");
960 }
961 if (afsr & SYSIO_SBAFSR_SBERR) {
962 reported++;
963 printk("(Error Ack)");
964 }
965 if (!reported)
966 printk("(none)");
967 printk("]\n");
968
969 /* XXX check iommu/strbuf for further error status XXX */
970
971 return IRQ_HANDLED;
972 }
973
974 #define ECC_CONTROL 0x0020UL
975 #define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
976 #define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
977 #define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
978
979 #define SYSIO_UE_INO 0x34
980 #define SYSIO_CE_INO 0x35
981 #define SYSIO_SBUSERR_INO 0x36
982
983 static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
984 {
985 struct sbus_iommu *iommu = sbus->iommu;
986 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
987 unsigned int irq;
988 u64 control;
989
990 irq = sbus_build_irq(sbus, SYSIO_UE_INO);
991 if (request_irq(irq, sysio_ue_handler,
992 IRQF_SHARED, "SYSIO UE", sbus) < 0) {
993 prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
994 sbus->portid);
995 prom_halt();
996 }
997
998 irq = sbus_build_irq(sbus, SYSIO_CE_INO);
999 if (request_irq(irq, sysio_ce_handler,
1000 IRQF_SHARED, "SYSIO CE", sbus) < 0) {
1001 prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
1002 sbus->portid);
1003 prom_halt();
1004 }
1005
1006 irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
1007 if (request_irq(irq, sysio_sbus_error_handler,
1008 IRQF_SHARED, "SYSIO SBUS Error", sbus) < 0) {
1009 prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
1010 sbus->portid);
1011 prom_halt();
1012 }
1013
1014 /* Now turn the error interrupts on and also enable ECC checking. */
1015 upa_writeq((SYSIO_ECNTRL_ECCEN |
1016 SYSIO_ECNTRL_UEEN |
1017 SYSIO_ECNTRL_CEEN),
1018 reg_base + ECC_CONTROL);
1019
1020 control = upa_readq(iommu->sbus_control_reg);
1021 control |= 0x100UL; /* SBUS Error Interrupt Enable */
1022 upa_writeq(control, iommu->sbus_control_reg);
1023 }
1024
1025 /* Boot time initialization. */
1026 static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1027 {
1028 const struct linux_prom64_registers *pr;
1029 struct device_node *dp;
1030 struct sbus_iommu *iommu;
1031 unsigned long regs;
1032 u64 control;
1033 int i;
1034
1035 dp = of_find_node_by_phandle(__node);
1036
1037 sbus->portid = of_getintprop_default(dp, "upa-portid", -1);
1038
1039 pr = of_get_property(dp, "reg", NULL);
1040 if (!pr) {
1041 prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
1042 prom_halt();
1043 }
1044 regs = pr->phys_addr;
1045
1046 iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
1047 if (iommu == NULL) {
1048 prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
1049 prom_halt();
1050 }
1051
1052 /* Align on E$ line boundary. */
1053 iommu = (struct sbus_iommu *)
1054 (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
1055 ~(SMP_CACHE_BYTES - 1UL));
1056
1057 memset(iommu, 0, sizeof(*iommu));
1058
1059 /* Setup spinlock. */
1060 spin_lock_init(&iommu->lock);
1061
1062 /* Init register offsets. */
1063 iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
1064 iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
1065
1066 /* The SYSIO SBUS control register is used for dummy reads
1067 * in order to ensure write completion.
1068 */
1069 iommu->sbus_control_reg = regs + 0x2000UL;
1070
1071 /* Link into SYSIO software state. */
1072 sbus->iommu = iommu;
1073
1074 printk("SYSIO: UPA portID %x, at %016lx\n",
1075 sbus->portid, regs);
1076
1077 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
1078 sbus_iommu_table_init(iommu, IO_TSB_SIZE);
1079
1080 control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
1081 control = ((7UL << 16UL) |
1082 (0UL << 2UL) |
1083 (1UL << 1UL) |
1084 (1UL << 0UL));
1085 upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
1086
1087 /* Clean out any cruft in the IOMMU using
1088 * diagnostic accesses.
1089 */
1090 for (i = 0; i < 16; i++) {
1091 unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
1092 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
1093
1094 dram += (unsigned long)i * 8UL;
1095 tag += (unsigned long)i * 8UL;
1096 upa_writeq(0, dram);
1097 upa_writeq(0, tag);
1098 }
1099 upa_readq(iommu->sbus_control_reg);
1100
1101 /* Give the TSB to SYSIO. */
1102 upa_writeq(__pa(iommu->page_table), iommu->iommu_regs + IOMMU_TSBBASE);
1103
1104 /* Setup streaming buffer, DE=1 SB_EN=1 */
1105 control = (1UL << 1UL) | (1UL << 0UL);
1106 upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
1107
1108 /* Clear out the tags using diagnostics. */
1109 for (i = 0; i < 16; i++) {
1110 unsigned long ptag, ltag;
1111
1112 ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
1113 ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
1114 ptag += (unsigned long)i * 8UL;
1115 ltag += (unsigned long)i * 8UL;
1116
1117 upa_writeq(0UL, ptag);
1118 upa_writeq(0UL, ltag);
1119 }
1120
1121 /* Enable DVMA arbitration for all devices/slots. */
1122 control = upa_readq(iommu->sbus_control_reg);
1123 control |= 0x3fUL;
1124 upa_writeq(control, iommu->sbus_control_reg);
1125
1126 /* Now some Xfire specific grot... */
1127 if (this_is_starfire)
1128 starfire_hookup(sbus->portid);
1129
1130 sysio_register_error_handlers(sbus);
1131 }
1132
1133 void sbus_fill_device_irq(struct sbus_dev *sdev)
1134 {
1135 struct device_node *dp = of_find_node_by_phandle(sdev->prom_node);
1136 const struct linux_prom_irqs *irqs;
1137
1138 irqs = of_get_property(dp, "interrupts", NULL);
1139 if (!irqs) {
1140 sdev->irqs[0] = 0;
1141 sdev->num_irqs = 0;
1142 } else {
1143 unsigned int pri = irqs[0].pri;
1144
1145 sdev->num_irqs = 1;
1146 if (pri < 0x20)
1147 pri += sdev->slot * 8;
1148
1149 sdev->irqs[0] = sbus_build_irq(sdev->bus, pri);
1150 }
1151 }
1152
1153 void __init sbus_arch_bus_ranges_init(struct device_node *pn, struct sbus_bus *sbus)
1154 {
1155 }
1156
1157 void __init sbus_setup_iommu(struct sbus_bus *sbus, struct device_node *dp)
1158 {
1159 sbus_iommu_init(dp->node, sbus);
1160 }
1161
1162 void __init sbus_setup_arch_props(struct sbus_bus *sbus, struct device_node *dp)
1163 {
1164 }
1165
1166 int __init sbus_arch_preinit(void)
1167 {
1168 return 0;
1169 }
1170
1171 void __init sbus_arch_postinit(void)
1172 {
1173 extern void firetruck_init(void);
1174
1175 firetruck_init();
1176 }