]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/sparc64/kernel/sbus.c
[SPARC64]: Consolidate {sbus,pci}_iommu_arena.
[mirror_ubuntu-zesty-kernel.git] / arch / sparc64 / kernel / sbus.c
CommitLineData
1da177e4
LT
1/* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
2 * sbus.c: UltraSparc SBUS controller support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/mm.h>
10#include <linux/spinlock.h>
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14
15#include <asm/page.h>
16#include <asm/sbus.h>
17#include <asm/io.h>
18#include <asm/upa.h>
19#include <asm/cache.h>
20#include <asm/dma.h>
21#include <asm/irq.h>
25c7581b 22#include <asm/prom.h>
1da177e4
LT
23#include <asm/starfire.h>
24
25#include "iommu_common.h"
26
1da177e4
LT
27#define MAP_BASE ((u32)0xc0000000)
28
29struct sbus_iommu {
2f3a2efd 30 spinlock_t lock;
1da177e4 31
9b3627f3 32 struct iommu_arena arena;
1da177e4 33
2f3a2efd
DM
34 iopte_t *page_table;
35 unsigned long strbuf_regs;
36 unsigned long iommu_regs;
37 unsigned long sbus_control_reg;
1da177e4 38
2f3a2efd 39 volatile unsigned long strbuf_flushflag;
1da177e4
LT
40};
41
42/* Offsets from iommu_regs */
43#define SYSIO_IOMMUREG_BASE 0x2400UL
44#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
45#define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
46#define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
47#define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
48#define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
49#define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
50#define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
51#define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
52
53#define IOMMU_DRAM_VALID (1UL << 30UL)
54
55static void __iommu_flushall(struct sbus_iommu *iommu)
56{
57 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
58 int entry;
59
60 for (entry = 0; entry < 16; entry++) {
61 upa_writeq(0, tag);
62 tag += 8UL;
63 }
64 upa_readq(iommu->sbus_control_reg);
1da177e4
LT
65}
66
67/* Offsets from strbuf_regs */
68#define SYSIO_STRBUFREG_BASE 0x2800UL
69#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
70#define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
71#define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
72#define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
73#define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
74#define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
75#define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
76
77#define STRBUF_TAG_VALID 0x02UL
78
7c963ad1 79static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction)
1da177e4 80{
4dbc30fb
DM
81 unsigned long n;
82 int limit;
83
4dbc30fb
DM
84 n = npages;
85 while (n--)
86 upa_writeq(base + (n << IO_PAGE_SHIFT),
1da177e4
LT
87 iommu->strbuf_regs + STRBUF_PFLUSH);
88
7c963ad1
DM
89 /* If the device could not have possibly put dirty data into
90 * the streaming cache, no flush-flag synchronization needs
91 * to be performed.
92 */
93 if (direction == SBUS_DMA_TODEVICE)
94 return;
95
96 iommu->strbuf_flushflag = 0UL;
97
1da177e4
LT
98 /* Whoopee cushion! */
99 upa_writeq(__pa(&iommu->strbuf_flushflag),
100 iommu->strbuf_regs + STRBUF_FSYNC);
101 upa_readq(iommu->sbus_control_reg);
4dbc30fb 102
a228dfd5 103 limit = 100000;
4dbc30fb
DM
104 while (iommu->strbuf_flushflag == 0UL) {
105 limit--;
106 if (!limit)
107 break;
a228dfd5 108 udelay(1);
4f07118f 109 rmb();
4dbc30fb
DM
110 }
111 if (!limit)
112 printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
113 "vaddr[%08x] npages[%ld]\n",
114 base, npages);
1da177e4
LT
115}
116
2f3a2efd
DM
117/* Based largely upon the ppc64 iommu allocator. */
118static long sbus_arena_alloc(struct sbus_iommu *iommu, unsigned long npages)
1da177e4 119{
9b3627f3 120 struct iommu_arena *arena = &iommu->arena;
2f3a2efd
DM
121 unsigned long n, i, start, end, limit;
122 int pass;
123
124 limit = arena->limit;
125 start = arena->hint;
126 pass = 0;
127
128again:
129 n = find_next_zero_bit(arena->map, limit, start);
130 end = n + npages;
131 if (unlikely(end >= limit)) {
132 if (likely(pass < 1)) {
133 limit = start;
134 start = 0;
135 __iommu_flushall(iommu);
136 pass++;
137 goto again;
1da177e4 138 } else {
2f3a2efd
DM
139 /* Scanned the whole thing, give up. */
140 return -1;
1da177e4 141 }
2f3a2efd 142 }
1da177e4 143
2f3a2efd
DM
144 for (i = n; i < end; i++) {
145 if (test_bit(i, arena->map)) {
146 start = i + 1;
147 goto again;
1da177e4 148 }
1da177e4
LT
149 }
150
2f3a2efd
DM
151 for (i = n; i < end; i++)
152 __set_bit(i, arena->map);
153
154 arena->hint = end;
155
156 return n;
1da177e4
LT
157}
158
9b3627f3 159static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
1da177e4 160{
2f3a2efd 161 unsigned long i;
1da177e4 162
2f3a2efd
DM
163 for (i = base; i < (base + npages); i++)
164 __clear_bit(i, arena->map);
1da177e4
LT
165}
166
2f3a2efd 167static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize)
1da177e4 168{
2f3a2efd 169 unsigned long tsbbase, order, sz, num_tsb_entries;
1da177e4 170
2f3a2efd 171 num_tsb_entries = tsbsize / sizeof(iopte_t);
1da177e4 172
2f3a2efd
DM
173 /* Setup initial software IOMMU state. */
174 spin_lock_init(&iommu->lock);
1da177e4 175
2f3a2efd
DM
176 /* Allocate and initialize the free area map. */
177 sz = num_tsb_entries / 8;
178 sz = (sz + 7UL) & ~7UL;
179 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
180 if (!iommu->arena.map) {
181 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
182 prom_halt();
183 }
184 iommu->arena.limit = num_tsb_entries;
185
186 /* Now allocate and setup the IOMMU page table itself. */
187 order = get_order(tsbsize);
188 tsbbase = __get_free_pages(GFP_KERNEL, order);
189 if (!tsbbase) {
190 prom_printf("IOMMU: Error, gfp(tsb) failed.\n");
191 prom_halt();
1da177e4 192 }
2f3a2efd
DM
193 iommu->page_table = (iopte_t *)tsbbase;
194 memset(iommu->page_table, 0, tsbsize);
1da177e4
LT
195}
196
2f3a2efd 197static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npages)
1da177e4 198{
2f3a2efd 199 long entry;
1da177e4 200
2f3a2efd
DM
201 entry = sbus_arena_alloc(iommu, npages);
202 if (unlikely(entry < 0))
203 return NULL;
1da177e4 204
2f3a2efd
DM
205 return iommu->page_table + entry;
206}
1da177e4 207
2f3a2efd
DM
208static inline void free_npages(struct sbus_iommu *iommu, dma_addr_t base, unsigned long npages)
209{
210 sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
1da177e4
LT
211}
212
213void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
214{
1da177e4
LT
215 struct sbus_iommu *iommu;
216 iopte_t *iopte;
2f3a2efd 217 unsigned long flags, order, first_page;
1da177e4
LT
218 void *ret;
219 int npages;
220
1da177e4
LT
221 size = IO_PAGE_ALIGN(size);
222 order = get_order(size);
223 if (order >= 10)
224 return NULL;
2f3a2efd 225
f3d48f03 226 first_page = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
1da177e4
LT
227 if (first_page == 0UL)
228 return NULL;
229 memset((char *)first_page, 0, PAGE_SIZE << order);
230
231 iommu = sdev->bus->iommu;
232
233 spin_lock_irqsave(&iommu->lock, flags);
2f3a2efd
DM
234 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
235 spin_unlock_irqrestore(&iommu->lock, flags);
236
237 if (unlikely(iopte == NULL)) {
1da177e4
LT
238 free_pages(first_page, order);
239 return NULL;
240 }
241
2f3a2efd
DM
242 *dvma_addr = (MAP_BASE +
243 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
1da177e4
LT
244 ret = (void *) first_page;
245 npages = size >> IO_PAGE_SHIFT;
2f3a2efd 246 first_page = __pa(first_page);
1da177e4 247 while (npages--) {
2f3a2efd
DM
248 iopte_val(*iopte) = (IOPTE_VALID | IOPTE_CACHE |
249 IOPTE_WRITE |
250 (first_page & IOPTE_PAGE));
251 iopte++;
1da177e4
LT
252 first_page += IO_PAGE_SIZE;
253 }
1da177e4
LT
254
255 return ret;
256}
257
258void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
259{
1da177e4 260 struct sbus_iommu *iommu;
2f3a2efd
DM
261 iopte_t *iopte;
262 unsigned long flags, order, npages;
1da177e4
LT
263
264 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
265 iommu = sdev->bus->iommu;
2f3a2efd
DM
266 iopte = iommu->page_table +
267 ((dvma - MAP_BASE) >> IO_PAGE_SHIFT);
1da177e4 268
2f3a2efd
DM
269 spin_lock_irqsave(&iommu->lock, flags);
270
271 free_npages(iommu, dvma - MAP_BASE, npages);
272
273 spin_unlock_irqrestore(&iommu->lock, flags);
1da177e4
LT
274
275 order = get_order(size);
276 if (order < 10)
277 free_pages((unsigned long)cpu, order);
278}
279
2f3a2efd 280dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
1da177e4 281{
2f3a2efd
DM
282 struct sbus_iommu *iommu;
283 iopte_t *base;
284 unsigned long flags, npages, oaddr;
285 unsigned long i, base_paddr;
286 u32 bus_addr, ret;
287 unsigned long iopte_protection;
1da177e4 288
2f3a2efd
DM
289 iommu = sdev->bus->iommu;
290
291 if (unlikely(direction == SBUS_DMA_NONE))
1da177e4
LT
292 BUG();
293
2f3a2efd
DM
294 oaddr = (unsigned long)ptr;
295 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
296 npages >>= IO_PAGE_SHIFT;
1da177e4
LT
297
298 spin_lock_irqsave(&iommu->lock, flags);
2f3a2efd 299 base = alloc_npages(iommu, npages);
1da177e4
LT
300 spin_unlock_irqrestore(&iommu->lock, flags);
301
2f3a2efd
DM
302 if (unlikely(!base))
303 BUG();
304
305 bus_addr = (MAP_BASE +
306 ((base - iommu->page_table) << IO_PAGE_SHIFT));
307 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
308 base_paddr = __pa(oaddr & IO_PAGE_MASK);
1da177e4 309
2f3a2efd
DM
310 iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
311 if (direction != SBUS_DMA_TODEVICE)
312 iopte_protection |= IOPTE_WRITE;
313
314 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
315 iopte_val(*base) = iopte_protection | base_paddr;
316
317 return ret;
1da177e4
LT
318}
319
2f3a2efd 320void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
1da177e4
LT
321{
322 struct sbus_iommu *iommu = sdev->bus->iommu;
2f3a2efd
DM
323 iopte_t *base;
324 unsigned long flags, npages, i;
325
326 if (unlikely(direction == SBUS_DMA_NONE))
327 BUG();
328
329 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
330 npages >>= IO_PAGE_SHIFT;
331 base = iommu->page_table +
332 ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT);
1da177e4 333
2f3a2efd 334 bus_addr &= IO_PAGE_MASK;
1da177e4
LT
335
336 spin_lock_irqsave(&iommu->lock, flags);
2f3a2efd
DM
337 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
338 for (i = 0; i < npages; i++)
339 iopte_val(base[i]) = 0UL;
340 free_npages(iommu, bus_addr - MAP_BASE, npages);
1da177e4
LT
341 spin_unlock_irqrestore(&iommu->lock, flags);
342}
343
344#define SG_ENT_PHYS_ADDRESS(SG) \
345 (__pa(page_address((SG)->page)) + (SG)->offset)
346
2f3a2efd
DM
347static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
348 int nused, int nelems, unsigned long iopte_protection)
1da177e4
LT
349{
350 struct scatterlist *dma_sg = sg;
351 struct scatterlist *sg_end = sg + nelems;
352 int i;
353
354 for (i = 0; i < nused; i++) {
355 unsigned long pteval = ~0UL;
356 u32 dma_npages;
357
358 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
359 dma_sg->dma_length +
360 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
361 do {
362 unsigned long offset;
363 signed int len;
364
365 /* If we are here, we know we have at least one
366 * more page to map. So walk forward until we
367 * hit a page crossing, and begin creating new
368 * mappings from that spot.
369 */
370 for (;;) {
371 unsigned long tmp;
372
2f3a2efd 373 tmp = SG_ENT_PHYS_ADDRESS(sg);
1da177e4
LT
374 len = sg->length;
375 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
376 pteval = tmp & IO_PAGE_MASK;
377 offset = tmp & (IO_PAGE_SIZE - 1UL);
378 break;
379 }
380 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
381 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
382 offset = 0UL;
383 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
384 break;
385 }
386 sg++;
387 }
388
2f3a2efd 389 pteval = iopte_protection | (pteval & IOPTE_PAGE);
1da177e4
LT
390 while (len > 0) {
391 *iopte++ = __iopte(pteval);
392 pteval += IO_PAGE_SIZE;
393 len -= (IO_PAGE_SIZE - offset);
394 offset = 0;
395 dma_npages--;
396 }
397
398 pteval = (pteval & IOPTE_PAGE) + len;
399 sg++;
400
401 /* Skip over any tail mappings we've fully mapped,
402 * adjusting pteval along the way. Stop when we
403 * detect a page crossing event.
404 */
405 while (sg < sg_end &&
406 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
407 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
408 ((pteval ^
409 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
410 pteval += sg->length;
411 sg++;
412 }
413 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
414 pteval = ~0UL;
415 } while (dma_npages != 0);
416 dma_sg++;
417 }
418}
419
2f3a2efd 420int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
1da177e4 421{
2f3a2efd
DM
422 struct sbus_iommu *iommu;
423 unsigned long flags, npages, iopte_protection;
424 iopte_t *base;
1da177e4
LT
425 u32 dma_base;
426 struct scatterlist *sgtmp;
427 int used;
1da177e4
LT
428
429 /* Fast path single entry scatterlists. */
2f3a2efd
DM
430 if (nelems == 1) {
431 sglist->dma_address =
1da177e4 432 sbus_map_single(sdev,
2f3a2efd
DM
433 (page_address(sglist->page) + sglist->offset),
434 sglist->length, direction);
435 sglist->dma_length = sglist->length;
1da177e4
LT
436 return 1;
437 }
438
2f3a2efd
DM
439 iommu = sdev->bus->iommu;
440
441 if (unlikely(direction == SBUS_DMA_NONE))
442 BUG();
443
444 npages = prepare_sg(sglist, nelems);
1da177e4
LT
445
446 spin_lock_irqsave(&iommu->lock, flags);
2f3a2efd
DM
447 base = alloc_npages(iommu, npages);
448 spin_unlock_irqrestore(&iommu->lock, flags);
449
450 if (unlikely(base == NULL))
451 BUG();
452
453 dma_base = MAP_BASE +
454 ((base - iommu->page_table) << IO_PAGE_SHIFT);
1da177e4
LT
455
456 /* Normalize DVMA addresses. */
2f3a2efd 457 used = nelems;
1da177e4 458
2f3a2efd 459 sgtmp = sglist;
1da177e4
LT
460 while (used && sgtmp->dma_length) {
461 sgtmp->dma_address += dma_base;
462 sgtmp++;
463 used--;
464 }
2f3a2efd 465 used = nelems - used;
1da177e4 466
2f3a2efd
DM
467 iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
468 if (direction != SBUS_DMA_TODEVICE)
469 iopte_protection |= IOPTE_WRITE;
470
471 fill_sg(base, sglist, used, nelems, iopte_protection);
1da177e4 472
1da177e4 473#ifdef VERIFY_SG
2f3a2efd 474 verify_sglist(sglist, nelems, base, npages);
1da177e4 475#endif
1da177e4
LT
476
477 return used;
1da177e4
LT
478}
479
2f3a2efd 480void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
1da177e4 481{
1da177e4 482 struct sbus_iommu *iommu;
2f3a2efd
DM
483 iopte_t *base;
484 unsigned long flags, i, npages;
485 u32 bus_addr;
1da177e4 486
2f3a2efd
DM
487 if (unlikely(direction == SBUS_DMA_NONE))
488 BUG();
1da177e4 489
2f3a2efd
DM
490 iommu = sdev->bus->iommu;
491
492 bus_addr = sglist->dma_address & IO_PAGE_MASK;
493
494 for (i = 1; i < nelems; i++)
495 if (sglist[i].dma_length == 0)
1da177e4 496 break;
1da177e4 497 i--;
2f3a2efd
DM
498 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
499 bus_addr) >> IO_PAGE_SHIFT;
500
501 base = iommu->page_table +
502 ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT);
1da177e4 503
1da177e4 504 spin_lock_irqsave(&iommu->lock, flags);
2f3a2efd
DM
505 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
506 for (i = 0; i < npages; i++)
507 iopte_val(base[i]) = 0UL;
508 free_npages(iommu, bus_addr - MAP_BASE, npages);
1da177e4
LT
509 spin_unlock_irqrestore(&iommu->lock, flags);
510}
511
2f3a2efd 512void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
1da177e4 513{
2f3a2efd
DM
514 struct sbus_iommu *iommu;
515 unsigned long flags, npages;
516
517 iommu = sdev->bus->iommu;
1da177e4 518
2f3a2efd
DM
519 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
520 npages >>= IO_PAGE_SHIFT;
521 bus_addr &= IO_PAGE_MASK;
1da177e4
LT
522
523 spin_lock_irqsave(&iommu->lock, flags);
2f3a2efd 524 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
1da177e4
LT
525 spin_unlock_irqrestore(&iommu->lock, flags);
526}
527
528void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
529{
530}
531
2f3a2efd 532void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
1da177e4 533{
2f3a2efd
DM
534 struct sbus_iommu *iommu;
535 unsigned long flags, npages, i;
536 u32 bus_addr;
537
538 iommu = sdev->bus->iommu;
1da177e4 539
2f3a2efd
DM
540 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
541 for (i = 0; i < nelems; i++) {
542 if (!sglist[i].dma_length)
1da177e4
LT
543 break;
544 }
545 i--;
2f3a2efd
DM
546 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
547 - bus_addr) >> IO_PAGE_SHIFT;
1da177e4
LT
548
549 spin_lock_irqsave(&iommu->lock, flags);
2f3a2efd 550 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
1da177e4
LT
551 spin_unlock_irqrestore(&iommu->lock, flags);
552}
553
554void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
555{
556}
557
558/* Enable 64-bit DVMA mode for the given device. */
559void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
560{
561 struct sbus_iommu *iommu = sdev->bus->iommu;
562 int slot = sdev->slot;
563 unsigned long cfg_reg;
564 u64 val;
565
566 cfg_reg = iommu->sbus_control_reg;
567 switch (slot) {
568 case 0:
569 cfg_reg += 0x20UL;
570 break;
571 case 1:
572 cfg_reg += 0x28UL;
573 break;
574 case 2:
575 cfg_reg += 0x30UL;
576 break;
577 case 3:
578 cfg_reg += 0x38UL;
579 break;
580 case 13:
581 cfg_reg += 0x40UL;
582 break;
583 case 14:
584 cfg_reg += 0x48UL;
585 break;
586 case 15:
587 cfg_reg += 0x50UL;
588 break;
589
590 default:
591 return;
592 };
593
594 val = upa_readq(cfg_reg);
595 if (val & (1UL << 14UL)) {
596 /* Extended transfer mode already enabled. */
597 return;
598 }
599
600 val |= (1UL << 14UL);
601
602 if (bursts & DMA_BURST8)
603 val |= (1UL << 1UL);
604 if (bursts & DMA_BURST16)
605 val |= (1UL << 2UL);
606 if (bursts & DMA_BURST32)
607 val |= (1UL << 3UL);
608 if (bursts & DMA_BURST64)
609 val |= (1UL << 4UL);
610 upa_writeq(val, cfg_reg);
611}
612
1da177e4
LT
613/* INO number to IMAP register offset for SYSIO external IRQ's.
614 * This should conform to both Sunfire/Wildfire server and Fusion
615 * desktop designs.
616 */
617#define SYSIO_IMAP_SLOT0 0x2c04UL
618#define SYSIO_IMAP_SLOT1 0x2c0cUL
619#define SYSIO_IMAP_SLOT2 0x2c14UL
620#define SYSIO_IMAP_SLOT3 0x2c1cUL
621#define SYSIO_IMAP_SCSI 0x3004UL
622#define SYSIO_IMAP_ETH 0x300cUL
623#define SYSIO_IMAP_BPP 0x3014UL
624#define SYSIO_IMAP_AUDIO 0x301cUL
625#define SYSIO_IMAP_PFAIL 0x3024UL
626#define SYSIO_IMAP_KMS 0x302cUL
627#define SYSIO_IMAP_FLPY 0x3034UL
628#define SYSIO_IMAP_SHW 0x303cUL
629#define SYSIO_IMAP_KBD 0x3044UL
630#define SYSIO_IMAP_MS 0x304cUL
631#define SYSIO_IMAP_SER 0x3054UL
632#define SYSIO_IMAP_TIM0 0x3064UL
633#define SYSIO_IMAP_TIM1 0x306cUL
634#define SYSIO_IMAP_UE 0x3074UL
635#define SYSIO_IMAP_CE 0x307cUL
636#define SYSIO_IMAP_SBERR 0x3084UL
637#define SYSIO_IMAP_PMGMT 0x308cUL
638#define SYSIO_IMAP_GFX 0x3094UL
639#define SYSIO_IMAP_EUPA 0x309cUL
640
641#define bogon ((unsigned long) -1)
642static unsigned long sysio_irq_offsets[] = {
643 /* SBUS Slot 0 --> 3, level 1 --> 7 */
644 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
645 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
646 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
647 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
648 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
649 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
650 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
651 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
652
653 /* Onboard devices (not relevant/used on SunFire). */
654 SYSIO_IMAP_SCSI,
655 SYSIO_IMAP_ETH,
656 SYSIO_IMAP_BPP,
657 bogon,
658 SYSIO_IMAP_AUDIO,
659 SYSIO_IMAP_PFAIL,
660 bogon,
661 bogon,
662 SYSIO_IMAP_KMS,
663 SYSIO_IMAP_FLPY,
664 SYSIO_IMAP_SHW,
665 SYSIO_IMAP_KBD,
666 SYSIO_IMAP_MS,
667 SYSIO_IMAP_SER,
668 bogon,
669 bogon,
670 SYSIO_IMAP_TIM0,
671 SYSIO_IMAP_TIM1,
672 bogon,
673 bogon,
674 SYSIO_IMAP_UE,
675 SYSIO_IMAP_CE,
676 SYSIO_IMAP_SBERR,
677 SYSIO_IMAP_PMGMT,
678};
679
680#undef bogon
681
84c1a13a 682#define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
1da177e4
LT
683
684/* Convert Interrupt Mapping register pointer to associated
685 * Interrupt Clear register pointer, SYSIO specific version.
686 */
687#define SYSIO_ICLR_UNUSED0 0x3400UL
688#define SYSIO_ICLR_SLOT0 0x340cUL
689#define SYSIO_ICLR_SLOT1 0x344cUL
690#define SYSIO_ICLR_SLOT2 0x348cUL
691#define SYSIO_ICLR_SLOT3 0x34ccUL
692static unsigned long sysio_imap_to_iclr(unsigned long imap)
693{
694 unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
695 return imap + diff;
696}
697
698unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
699{
700 struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
701 struct sbus_iommu *iommu = sbus->iommu;
702 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
703 unsigned long imap, iclr;
37cdcd9e 704 int sbus_level = 0;
1da177e4
LT
705
706 imap = sysio_irq_offsets[ino];
707 if (imap == ((unsigned long)-1)) {
37cdcd9e
DM
708 prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
709 ino);
1da177e4
LT
710 prom_halt();
711 }
712 imap += reg_base;
713
714 /* SYSIO inconsistency. For external SLOTS, we have to select
715 * the right ICLR register based upon the lower SBUS irq level
716 * bits.
717 */
718 if (ino >= 0x20) {
719 iclr = sysio_imap_to_iclr(imap);
720 } else {
721 int sbus_slot = (ino & 0x18)>>3;
722
723 sbus_level = ino & 0x7;
724
725 switch(sbus_slot) {
726 case 0:
727 iclr = reg_base + SYSIO_ICLR_SLOT0;
728 break;
729 case 1:
730 iclr = reg_base + SYSIO_ICLR_SLOT1;
731 break;
732 case 2:
733 iclr = reg_base + SYSIO_ICLR_SLOT2;
734 break;
735 default:
736 case 3:
737 iclr = reg_base + SYSIO_ICLR_SLOT3;
738 break;
739 };
740
741 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
742 }
e18e2a00 743 return build_irq(sbus_level, iclr, imap);
1da177e4
LT
744}
745
746/* Error interrupt handling. */
747#define SYSIO_UE_AFSR 0x0030UL
748#define SYSIO_UE_AFAR 0x0038UL
749#define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
750#define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
751#define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
752#define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
753#define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
754#define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
755#define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
756#define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
757#define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
758#define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
759#define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
6d24c8dc 760static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
1da177e4
LT
761{
762 struct sbus_bus *sbus = dev_id;
763 struct sbus_iommu *iommu = sbus->iommu;
764 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
765 unsigned long afsr_reg, afar_reg;
766 unsigned long afsr, afar, error_bits;
767 int reported;
768
769 afsr_reg = reg_base + SYSIO_UE_AFSR;
770 afar_reg = reg_base + SYSIO_UE_AFAR;
771
772 /* Latch error status. */
773 afsr = upa_readq(afsr_reg);
774 afar = upa_readq(afar_reg);
775
776 /* Clear primary/secondary error status bits. */
777 error_bits = afsr &
778 (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
779 SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
780 upa_writeq(error_bits, afsr_reg);
781
782 /* Log the error. */
783 printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
784 sbus->portid,
785 (((error_bits & SYSIO_UEAFSR_PPIO) ?
786 "PIO" :
787 ((error_bits & SYSIO_UEAFSR_PDRD) ?
788 "DVMA Read" :
789 ((error_bits & SYSIO_UEAFSR_PDWR) ?
790 "DVMA Write" : "???")))));
791 printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
792 sbus->portid,
793 (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
794 (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
795 (afsr & SYSIO_UEAFSR_MID) >> 37UL);
796 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
797 printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
798 reported = 0;
799 if (afsr & SYSIO_UEAFSR_SPIO) {
800 reported++;
801 printk("(PIO)");
802 }
803 if (afsr & SYSIO_UEAFSR_SDRD) {
804 reported++;
805 printk("(DVMA Read)");
806 }
807 if (afsr & SYSIO_UEAFSR_SDWR) {
808 reported++;
809 printk("(DVMA Write)");
810 }
811 if (!reported)
812 printk("(none)");
813 printk("]\n");
814
815 return IRQ_HANDLED;
816}
817
818#define SYSIO_CE_AFSR 0x0040UL
819#define SYSIO_CE_AFAR 0x0048UL
820#define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
821#define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
822#define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
823#define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
824#define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
825#define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
826#define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
827#define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
828#define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
829#define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
830#define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
831#define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
6d24c8dc 832static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
1da177e4
LT
833{
834 struct sbus_bus *sbus = dev_id;
835 struct sbus_iommu *iommu = sbus->iommu;
836 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
837 unsigned long afsr_reg, afar_reg;
838 unsigned long afsr, afar, error_bits;
839 int reported;
840
841 afsr_reg = reg_base + SYSIO_CE_AFSR;
842 afar_reg = reg_base + SYSIO_CE_AFAR;
843
844 /* Latch error status. */
845 afsr = upa_readq(afsr_reg);
846 afar = upa_readq(afar_reg);
847
848 /* Clear primary/secondary error status bits. */
849 error_bits = afsr &
850 (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
851 SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
852 upa_writeq(error_bits, afsr_reg);
853
854 printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
855 sbus->portid,
856 (((error_bits & SYSIO_CEAFSR_PPIO) ?
857 "PIO" :
858 ((error_bits & SYSIO_CEAFSR_PDRD) ?
859 "DVMA Read" :
860 ((error_bits & SYSIO_CEAFSR_PDWR) ?
861 "DVMA Write" : "???")))));
862
863 /* XXX Use syndrome and afar to print out module string just like
864 * XXX UDB CE trap handler does... -DaveM
865 */
866 printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
867 sbus->portid,
868 (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
869 (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
870 (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
871 (afsr & SYSIO_CEAFSR_MID) >> 37UL);
872 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
873
874 printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
875 reported = 0;
876 if (afsr & SYSIO_CEAFSR_SPIO) {
877 reported++;
878 printk("(PIO)");
879 }
880 if (afsr & SYSIO_CEAFSR_SDRD) {
881 reported++;
882 printk("(DVMA Read)");
883 }
884 if (afsr & SYSIO_CEAFSR_SDWR) {
885 reported++;
886 printk("(DVMA Write)");
887 }
888 if (!reported)
889 printk("(none)");
890 printk("]\n");
891
892 return IRQ_HANDLED;
893}
894
895#define SYSIO_SBUS_AFSR 0x2010UL
896#define SYSIO_SBUS_AFAR 0x2018UL
897#define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
898#define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
899#define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
900#define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
901#define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
902#define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
903#define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
904#define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
905#define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
906#define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
907#define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
908#define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
6d24c8dc 909static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
1da177e4
LT
910{
911 struct sbus_bus *sbus = dev_id;
912 struct sbus_iommu *iommu = sbus->iommu;
913 unsigned long afsr_reg, afar_reg, reg_base;
914 unsigned long afsr, afar, error_bits;
915 int reported;
916
917 reg_base = iommu->sbus_control_reg - 0x2000UL;
918 afsr_reg = reg_base + SYSIO_SBUS_AFSR;
919 afar_reg = reg_base + SYSIO_SBUS_AFAR;
920
921 afsr = upa_readq(afsr_reg);
922 afar = upa_readq(afar_reg);
923
924 /* Clear primary/secondary error status bits. */
925 error_bits = afsr &
926 (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
927 SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
928 upa_writeq(error_bits, afsr_reg);
929
930 /* Log the error. */
931 printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
932 sbus->portid,
933 (((error_bits & SYSIO_SBAFSR_PLE) ?
934 "Late PIO Error" :
935 ((error_bits & SYSIO_SBAFSR_PTO) ?
936 "Time Out" :
937 ((error_bits & SYSIO_SBAFSR_PBERR) ?
938 "Error Ack" : "???")))),
939 (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
940 printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
941 sbus->portid,
942 (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
943 (afsr & SYSIO_SBAFSR_MID) >> 37UL);
944 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
945 printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
946 reported = 0;
947 if (afsr & SYSIO_SBAFSR_SLE) {
948 reported++;
949 printk("(Late PIO Error)");
950 }
951 if (afsr & SYSIO_SBAFSR_STO) {
952 reported++;
953 printk("(Time Out)");
954 }
955 if (afsr & SYSIO_SBAFSR_SBERR) {
956 reported++;
957 printk("(Error Ack)");
958 }
959 if (!reported)
960 printk("(none)");
961 printk("]\n");
962
963 /* XXX check iommu/strbuf for further error status XXX */
964
965 return IRQ_HANDLED;
966}
967
968#define ECC_CONTROL 0x0020UL
969#define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
970#define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
971#define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
972
973#define SYSIO_UE_INO 0x34
974#define SYSIO_CE_INO 0x35
975#define SYSIO_SBUSERR_INO 0x36
976
977static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
978{
979 struct sbus_iommu *iommu = sbus->iommu;
980 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
981 unsigned int irq;
982 u64 control;
983
984 irq = sbus_build_irq(sbus, SYSIO_UE_INO);
985 if (request_irq(irq, sysio_ue_handler,
d356d7f4 986 IRQF_SHARED, "SYSIO UE", sbus) < 0) {
1da177e4
LT
987 prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
988 sbus->portid);
989 prom_halt();
990 }
991
992 irq = sbus_build_irq(sbus, SYSIO_CE_INO);
993 if (request_irq(irq, sysio_ce_handler,
d356d7f4 994 IRQF_SHARED, "SYSIO CE", sbus) < 0) {
1da177e4
LT
995 prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
996 sbus->portid);
997 prom_halt();
998 }
999
1000 irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
1001 if (request_irq(irq, sysio_sbus_error_handler,
d356d7f4 1002 IRQF_SHARED, "SYSIO SBUS Error", sbus) < 0) {
1da177e4
LT
1003 prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
1004 sbus->portid);
1005 prom_halt();
1006 }
1007
1008 /* Now turn the error interrupts on and also enable ECC checking. */
1009 upa_writeq((SYSIO_ECNTRL_ECCEN |
1010 SYSIO_ECNTRL_UEEN |
1011 SYSIO_ECNTRL_CEEN),
1012 reg_base + ECC_CONTROL);
1013
1014 control = upa_readq(iommu->sbus_control_reg);
1015 control |= 0x100UL; /* SBUS Error Interrupt Enable */
1016 upa_writeq(control, iommu->sbus_control_reg);
1017}
1018
1019/* Boot time initialization. */
576c352e 1020static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1da177e4 1021{
6a23acf3 1022 const struct linux_prom64_registers *pr;
25c7581b 1023 struct device_node *dp;
1da177e4 1024 struct sbus_iommu *iommu;
2f3a2efd 1025 unsigned long regs;
1da177e4 1026 u64 control;
25c7581b
DM
1027 int i;
1028
1029 dp = of_find_node_by_phandle(__node);
1da177e4 1030
25c7581b 1031 sbus->portid = of_getintprop_default(dp, "upa-portid", -1);
1da177e4 1032
25c7581b
DM
1033 pr = of_get_property(dp, "reg", NULL);
1034 if (!pr) {
1da177e4
LT
1035 prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
1036 prom_halt();
1037 }
25c7581b 1038 regs = pr->phys_addr;
1da177e4
LT
1039
1040 iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
1041 if (iommu == NULL) {
1042 prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
1043 prom_halt();
1044 }
1045
1046 /* Align on E$ line boundary. */
1047 iommu = (struct sbus_iommu *)
1048 (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
1049 ~(SMP_CACHE_BYTES - 1UL));
1050
1051 memset(iommu, 0, sizeof(*iommu));
1052
1da177e4
LT
1053 /* Setup spinlock. */
1054 spin_lock_init(&iommu->lock);
1055
1056 /* Init register offsets. */
1057 iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
1058 iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
1059
1060 /* The SYSIO SBUS control register is used for dummy reads
1061 * in order to ensure write completion.
1062 */
1063 iommu->sbus_control_reg = regs + 0x2000UL;
1064
1065 /* Link into SYSIO software state. */
1066 sbus->iommu = iommu;
1067
1068 printk("SYSIO: UPA portID %x, at %016lx\n",
1069 sbus->portid, regs);
1070
1071 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
2f3a2efd
DM
1072 sbus_iommu_table_init(iommu, IO_TSB_SIZE);
1073
1da177e4
LT
1074 control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
1075 control = ((7UL << 16UL) |
1076 (0UL << 2UL) |
1077 (1UL << 1UL) |
1078 (1UL << 0UL));
1da177e4
LT
1079 upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
1080
1081 /* Clean out any cruft in the IOMMU using
1082 * diagnostic accesses.
1083 */
1084 for (i = 0; i < 16; i++) {
1085 unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
1086 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
1087
1088 dram += (unsigned long)i * 8UL;
1089 tag += (unsigned long)i * 8UL;
1090 upa_writeq(0, dram);
1091 upa_writeq(0, tag);
1092 }
1093 upa_readq(iommu->sbus_control_reg);
1094
1095 /* Give the TSB to SYSIO. */
2f3a2efd 1096 upa_writeq(__pa(iommu->page_table), iommu->iommu_regs + IOMMU_TSBBASE);
1da177e4
LT
1097
1098 /* Setup streaming buffer, DE=1 SB_EN=1 */
1099 control = (1UL << 1UL) | (1UL << 0UL);
1100 upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
1101
1102 /* Clear out the tags using diagnostics. */
1103 for (i = 0; i < 16; i++) {
1104 unsigned long ptag, ltag;
1105
1106 ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
1107 ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
1108 ptag += (unsigned long)i * 8UL;
1109 ltag += (unsigned long)i * 8UL;
1110
1111 upa_writeq(0UL, ptag);
1112 upa_writeq(0UL, ltag);
1113 }
1114
1115 /* Enable DVMA arbitration for all devices/slots. */
1116 control = upa_readq(iommu->sbus_control_reg);
1117 control |= 0x3fUL;
1118 upa_writeq(control, iommu->sbus_control_reg);
1119
1120 /* Now some Xfire specific grot... */
1121 if (this_is_starfire)
286bbe87 1122 starfire_hookup(sbus->portid);
1da177e4
LT
1123
1124 sysio_register_error_handlers(sbus);
1125}
8fae097d
DM
1126
1127void sbus_fill_device_irq(struct sbus_dev *sdev)
1128{
25c7581b 1129 struct device_node *dp = of_find_node_by_phandle(sdev->prom_node);
6a23acf3 1130 const struct linux_prom_irqs *irqs;
8fae097d 1131
25c7581b
DM
1132 irqs = of_get_property(dp, "interrupts", NULL);
1133 if (!irqs) {
8fae097d
DM
1134 sdev->irqs[0] = 0;
1135 sdev->num_irqs = 0;
1136 } else {
1137 unsigned int pri = irqs[0].pri;
1138
1139 sdev->num_irqs = 1;
1140 if (pri < 0x20)
1141 pri += sdev->slot * 8;
1142
1143 sdev->irqs[0] = sbus_build_irq(sdev->bus, pri);
1144 }
1145}
576c352e
DM
1146
1147void __init sbus_arch_bus_ranges_init(struct device_node *pn, struct sbus_bus *sbus)
1148{
1149}
1150
1151void __init sbus_setup_iommu(struct sbus_bus *sbus, struct device_node *dp)
1152{
1153 sbus_iommu_init(dp->node, sbus);
1154}
1155
1156void __init sbus_setup_arch_props(struct sbus_bus *sbus, struct device_node *dp)
1157{
1158}
1159
1160int __init sbus_arch_preinit(void)
1161{
1162 return 0;
1163}
1164
1165void __init sbus_arch_postinit(void)
1166{
1167 extern void firetruck_init(void);
576c352e
DM
1168
1169 firetruck_init();
576c352e 1170}