]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * iommu.c: IOMMU specific routines for memory management. | |
3 | * | |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | |
5 | * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) | |
6 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | |
7 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
8 | */ | |
9 | ||
10 | #include <linux/config.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ | |
16 | ||
17 | #include <asm/scatterlist.h> | |
18 | #include <asm/pgalloc.h> | |
19 | #include <asm/pgtable.h> | |
20 | #include <asm/sbus.h> | |
21 | #include <asm/io.h> | |
22 | #include <asm/mxcc.h> | |
23 | #include <asm/mbus.h> | |
24 | #include <asm/cacheflush.h> | |
25 | #include <asm/tlbflush.h> | |
26 | #include <asm/bitext.h> | |
27 | #include <asm/iommu.h> | |
28 | #include <asm/dma.h> | |
29 | ||
30 | /* | |
31 | * This can be sized dynamically, but we will do this | |
32 | * only when we have a guidance about actual I/O pressures. | |
33 | */ | |
34 | #define IOMMU_RNGE IOMMU_RNGE_256MB | |
35 | #define IOMMU_START 0xF0000000 | |
36 | #define IOMMU_WINSIZE (256*1024*1024U) | |
37 | #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */ | |
38 | #define IOMMU_ORDER 6 /* 4096 * (1<<6) */ | |
39 | ||
40 | /* srmmu.c */ | |
41 | extern int viking_mxcc_present; | |
42 | BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) | |
43 | #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) | |
44 | extern int flush_page_for_dma_global; | |
45 | static int viking_flush; | |
46 | /* viking.S */ | |
47 | extern void viking_flush_page(unsigned long page); | |
48 | extern void viking_mxcc_flush_page(unsigned long page); | |
49 | ||
50 | /* | |
51 | * Values precomputed according to CPU type. | |
52 | */ | |
53 | static unsigned int ioperm_noc; /* Consistent mapping iopte flags */ | |
54 | static pgprot_t dvma_prot; /* Consistent mapping pte flags */ | |
55 | ||
56 | #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID) | |
57 | #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ) | |
58 | ||
59 | void __init | |
60 | iommu_init(int iommund, struct sbus_bus *sbus) | |
61 | { | |
62 | unsigned int impl, vers; | |
63 | unsigned long tmp; | |
64 | struct iommu_struct *iommu; | |
65 | struct linux_prom_registers iommu_promregs[PROMREG_MAX]; | |
66 | struct resource r; | |
67 | unsigned long *bitmap; | |
68 | ||
69 | iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC); | |
70 | if (!iommu) { | |
71 | prom_printf("Unable to allocate iommu structure\n"); | |
72 | prom_halt(); | |
73 | } | |
74 | iommu->regs = NULL; | |
75 | if (prom_getproperty(iommund, "reg", (void *) iommu_promregs, | |
76 | sizeof(iommu_promregs)) != -1) { | |
77 | memset(&r, 0, sizeof(r)); | |
78 | r.flags = iommu_promregs[0].which_io; | |
79 | r.start = iommu_promregs[0].phys_addr; | |
80 | iommu->regs = (struct iommu_regs *) | |
81 | sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs"); | |
82 | } | |
83 | if (!iommu->regs) { | |
84 | prom_printf("Cannot map IOMMU registers\n"); | |
85 | prom_halt(); | |
86 | } | |
87 | impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28; | |
88 | vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24; | |
89 | tmp = iommu->regs->control; | |
90 | tmp &= ~(IOMMU_CTRL_RNGE); | |
91 | tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB); | |
92 | iommu->regs->control = tmp; | |
93 | iommu_invalidate(iommu->regs); | |
94 | iommu->start = IOMMU_START; | |
95 | iommu->end = 0xffffffff; | |
96 | ||
97 | /* Allocate IOMMU page table */ | |
98 | /* Stupid alignment constraints give me a headache. | |
99 | We need 256K or 512K or 1M or 2M area aligned to | |
100 | its size and current gfp will fortunately give | |
101 | it to us. */ | |
102 | tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER); | |
103 | if (!tmp) { | |
104 | prom_printf("Unable to allocate iommu table [0x%08x]\n", | |
105 | IOMMU_NPTES*sizeof(iopte_t)); | |
106 | prom_halt(); | |
107 | } | |
108 | iommu->page_table = (iopte_t *)tmp; | |
109 | ||
110 | /* Initialize new table. */ | |
111 | memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t)); | |
112 | flush_cache_all(); | |
113 | flush_tlb_all(); | |
114 | iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4; | |
115 | iommu_invalidate(iommu->regs); | |
116 | ||
117 | bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL); | |
118 | if (!bitmap) { | |
119 | prom_printf("Unable to allocate iommu bitmap [%d]\n", | |
120 | (int)(IOMMU_NPTES>>3)); | |
121 | prom_halt(); | |
122 | } | |
123 | bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES); | |
124 | /* To be coherent on HyperSparc, the page color of DVMA | |
125 | * and physical addresses must match. | |
126 | */ | |
127 | if (srmmu_modtype == HyperSparc) | |
128 | iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT; | |
129 | else | |
130 | iommu->usemap.num_colors = 1; | |
131 | ||
132 | printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n", | |
133 | impl, vers, iommu->page_table, | |
134 | (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES); | |
135 | ||
136 | sbus->iommu = iommu; | |
137 | } | |
138 | ||
139 | /* This begs to be btfixup-ed by srmmu. */ | |
140 | /* Flush the iotlb entries to ram. */ | |
141 | /* This could be better if we didn't have to flush whole pages. */ | |
142 | static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte) | |
143 | { | |
144 | unsigned long start; | |
145 | unsigned long end; | |
146 | ||
147 | start = (unsigned long)iopte & PAGE_MASK; | |
148 | end = PAGE_ALIGN(start + niopte*sizeof(iopte_t)); | |
149 | if (viking_mxcc_present) { | |
150 | while(start < end) { | |
151 | viking_mxcc_flush_page(start); | |
152 | start += PAGE_SIZE; | |
153 | } | |
154 | } else if (viking_flush) { | |
155 | while(start < end) { | |
156 | viking_flush_page(start); | |
157 | start += PAGE_SIZE; | |
158 | } | |
159 | } else { | |
160 | while(start < end) { | |
161 | __flush_page_to_ram(start); | |
162 | start += PAGE_SIZE; | |
163 | } | |
164 | } | |
165 | } | |
166 | ||
167 | static u32 iommu_get_one(struct page *page, int npages, struct sbus_bus *sbus) | |
168 | { | |
169 | struct iommu_struct *iommu = sbus->iommu; | |
170 | int ioptex; | |
171 | iopte_t *iopte, *iopte0; | |
172 | unsigned int busa, busa0; | |
173 | int i; | |
174 | ||
175 | /* page color = pfn of page */ | |
176 | ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); | |
177 | if (ioptex < 0) | |
178 | panic("iommu out"); | |
179 | busa0 = iommu->start + (ioptex << PAGE_SHIFT); | |
180 | iopte0 = &iommu->page_table[ioptex]; | |
181 | ||
182 | busa = busa0; | |
183 | iopte = iopte0; | |
184 | for (i = 0; i < npages; i++) { | |
185 | iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM); | |
186 | iommu_invalidate_page(iommu->regs, busa); | |
187 | busa += PAGE_SIZE; | |
188 | iopte++; | |
189 | page++; | |
190 | } | |
191 | ||
192 | iommu_flush_iotlb(iopte0, npages); | |
193 | ||
194 | return busa0; | |
195 | } | |
196 | ||
197 | static u32 iommu_get_scsi_one(char *vaddr, unsigned int len, | |
198 | struct sbus_bus *sbus) | |
199 | { | |
200 | unsigned long off; | |
201 | int npages; | |
202 | struct page *page; | |
203 | u32 busa; | |
204 | ||
205 | off = (unsigned long)vaddr & ~PAGE_MASK; | |
206 | npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; | |
207 | page = virt_to_page((unsigned long)vaddr & PAGE_MASK); | |
208 | busa = iommu_get_one(page, npages, sbus); | |
209 | return busa + off; | |
210 | } | |
211 | ||
212 | static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus) | |
213 | { | |
214 | return iommu_get_scsi_one(vaddr, len, sbus); | |
215 | } | |
216 | ||
217 | static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus) | |
218 | { | |
219 | flush_page_for_dma(0); | |
220 | return iommu_get_scsi_one(vaddr, len, sbus); | |
221 | } | |
222 | ||
223 | static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus) | |
224 | { | |
225 | unsigned long page = ((unsigned long) vaddr) & PAGE_MASK; | |
226 | ||
227 | while(page < ((unsigned long)(vaddr + len))) { | |
228 | flush_page_for_dma(page); | |
229 | page += PAGE_SIZE; | |
230 | } | |
231 | return iommu_get_scsi_one(vaddr, len, sbus); | |
232 | } | |
233 | ||
234 | static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | |
235 | { | |
236 | int n; | |
237 | ||
238 | while (sz != 0) { | |
239 | --sz; | |
240 | n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | |
241 | sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; | |
242 | sg->dvma_length = (__u32) sg->length; | |
243 | sg++; | |
244 | } | |
245 | } | |
246 | ||
247 | static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | |
248 | { | |
249 | int n; | |
250 | ||
251 | flush_page_for_dma(0); | |
252 | while (sz != 0) { | |
253 | --sz; | |
254 | n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | |
255 | sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; | |
256 | sg->dvma_length = (__u32) sg->length; | |
257 | sg++; | |
258 | } | |
259 | } | |
260 | ||
261 | static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | |
262 | { | |
263 | unsigned long page, oldpage = 0; | |
264 | int n, i; | |
265 | ||
266 | while(sz != 0) { | |
267 | --sz; | |
268 | ||
269 | n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | |
270 | ||
271 | /* | |
272 | * We expect unmapped highmem pages to be not in the cache. | |
273 | * XXX Is this a good assumption? | |
274 | * XXX What if someone else unmaps it here and races us? | |
275 | */ | |
276 | if ((page = (unsigned long) page_address(sg->page)) != 0) { | |
277 | for (i = 0; i < n; i++) { | |
278 | if (page != oldpage) { /* Already flushed? */ | |
279 | flush_page_for_dma(page); | |
280 | oldpage = page; | |
281 | } | |
282 | page += PAGE_SIZE; | |
283 | } | |
284 | } | |
285 | ||
286 | sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; | |
287 | sg->dvma_length = (__u32) sg->length; | |
288 | sg++; | |
289 | } | |
290 | } | |
291 | ||
292 | static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus) | |
293 | { | |
294 | struct iommu_struct *iommu = sbus->iommu; | |
295 | int ioptex; | |
296 | int i; | |
297 | ||
298 | if (busa < iommu->start) | |
299 | BUG(); | |
300 | ioptex = (busa - iommu->start) >> PAGE_SHIFT; | |
301 | for (i = 0; i < npages; i++) { | |
302 | iopte_val(iommu->page_table[ioptex + i]) = 0; | |
303 | iommu_invalidate_page(iommu->regs, busa); | |
304 | busa += PAGE_SIZE; | |
305 | } | |
306 | bit_map_clear(&iommu->usemap, ioptex, npages); | |
307 | } | |
308 | ||
309 | static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus) | |
310 | { | |
311 | unsigned long off; | |
312 | int npages; | |
313 | ||
314 | off = vaddr & ~PAGE_MASK; | |
315 | npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; | |
316 | iommu_release_one(vaddr & PAGE_MASK, npages, sbus); | |
317 | } | |
318 | ||
319 | static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | |
320 | { | |
321 | int n; | |
322 | ||
323 | while(sz != 0) { | |
324 | --sz; | |
325 | ||
326 | n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | |
327 | iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus); | |
328 | sg->dvma_address = 0x21212121; | |
329 | sg++; | |
330 | } | |
331 | } | |
332 | ||
333 | #ifdef CONFIG_SBUS | |
334 | static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va, | |
335 | unsigned long addr, int len) | |
336 | { | |
337 | unsigned long page, end; | |
338 | struct iommu_struct *iommu = sbus_root->iommu; | |
339 | iopte_t *iopte = iommu->page_table; | |
340 | iopte_t *first; | |
341 | int ioptex; | |
342 | ||
343 | if ((va & ~PAGE_MASK) != 0) BUG(); | |
344 | if ((addr & ~PAGE_MASK) != 0) BUG(); | |
345 | if ((len & ~PAGE_MASK) != 0) BUG(); | |
346 | ||
347 | /* page color = physical address */ | |
348 | ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, | |
349 | addr >> PAGE_SHIFT); | |
350 | if (ioptex < 0) | |
351 | panic("iommu out"); | |
352 | ||
353 | iopte += ioptex; | |
354 | first = iopte; | |
355 | end = addr + len; | |
356 | while(addr < end) { | |
357 | page = va; | |
358 | { | |
359 | pgd_t *pgdp; | |
360 | pmd_t *pmdp; | |
361 | pte_t *ptep; | |
362 | ||
363 | if (viking_mxcc_present) | |
364 | viking_mxcc_flush_page(page); | |
365 | else if (viking_flush) | |
366 | viking_flush_page(page); | |
367 | else | |
368 | __flush_page_to_ram(page); | |
369 | ||
370 | pgdp = pgd_offset(&init_mm, addr); | |
371 | pmdp = pmd_offset(pgdp, addr); | |
372 | ptep = pte_offset_map(pmdp, addr); | |
373 | ||
374 | set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); | |
375 | } | |
376 | iopte_val(*iopte++) = | |
377 | MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc); | |
378 | addr += PAGE_SIZE; | |
379 | va += PAGE_SIZE; | |
380 | } | |
381 | /* P3: why do we need this? | |
382 | * | |
383 | * DAVEM: Because there are several aspects, none of which | |
384 | * are handled by a single interface. Some cpus are | |
385 | * completely not I/O DMA coherent, and some have | |
386 | * virtually indexed caches. The driver DMA flushing | |
387 | * methods handle the former case, but here during | |
388 | * IOMMU page table modifications, and usage of non-cacheable | |
389 | * cpu mappings of pages potentially in the cpu caches, we have | |
390 | * to handle the latter case as well. | |
391 | */ | |
392 | flush_cache_all(); | |
393 | iommu_flush_iotlb(first, len >> PAGE_SHIFT); | |
394 | flush_tlb_all(); | |
395 | iommu_invalidate(iommu->regs); | |
396 | ||
397 | *pba = iommu->start + (ioptex << PAGE_SHIFT); | |
398 | return 0; | |
399 | } | |
400 | ||
401 | static void iommu_unmap_dma_area(unsigned long busa, int len) | |
402 | { | |
403 | struct iommu_struct *iommu = sbus_root->iommu; | |
404 | iopte_t *iopte = iommu->page_table; | |
405 | unsigned long end; | |
406 | int ioptex = (busa - iommu->start) >> PAGE_SHIFT; | |
407 | ||
408 | if ((busa & ~PAGE_MASK) != 0) BUG(); | |
409 | if ((len & ~PAGE_MASK) != 0) BUG(); | |
410 | ||
411 | iopte += ioptex; | |
412 | end = busa + len; | |
413 | while (busa < end) { | |
414 | iopte_val(*iopte++) = 0; | |
415 | busa += PAGE_SIZE; | |
416 | } | |
417 | flush_tlb_all(); | |
418 | iommu_invalidate(iommu->regs); | |
419 | bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT); | |
420 | } | |
421 | ||
422 | static struct page *iommu_translate_dvma(unsigned long busa) | |
423 | { | |
424 | struct iommu_struct *iommu = sbus_root->iommu; | |
425 | iopte_t *iopte = iommu->page_table; | |
426 | ||
427 | iopte += ((busa - iommu->start) >> PAGE_SHIFT); | |
428 | return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4)); | |
429 | } | |
430 | #endif | |
431 | ||
432 | static char *iommu_lockarea(char *vaddr, unsigned long len) | |
433 | { | |
434 | return vaddr; | |
435 | } | |
436 | ||
437 | static void iommu_unlockarea(char *vaddr, unsigned long len) | |
438 | { | |
439 | } | |
440 | ||
441 | void __init ld_mmu_iommu(void) | |
442 | { | |
443 | viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page); | |
444 | BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0); | |
445 | BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP); | |
446 | ||
447 | if (!BTFIXUPVAL_CALL(flush_page_for_dma)) { | |
448 | /* IO coherent chip */ | |
449 | BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0); | |
450 | BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM); | |
451 | } else if (flush_page_for_dma_global) { | |
452 | /* flush_page_for_dma flushes everything, no matter of what page is it */ | |
453 | BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM); | |
454 | BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM); | |
455 | } else { | |
456 | BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM); | |
457 | BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM); | |
458 | } | |
459 | BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM); | |
460 | BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM); | |
461 | ||
462 | #ifdef CONFIG_SBUS | |
463 | BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM); | |
464 | BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM); | |
465 | BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM); | |
466 | #endif | |
467 | ||
468 | if (viking_mxcc_present || srmmu_modtype == HyperSparc) { | |
469 | dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); | |
470 | ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID; | |
471 | } else { | |
472 | dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV); | |
473 | ioperm_noc = IOPTE_WRITE | IOPTE_VALID; | |
474 | } | |
475 | } |