]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * iommu.c: IOMMU specific routines for memory management. | |
3 | * | |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | |
5 | * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) | |
6 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | |
7 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
8 | */ | |
9 | ||
1da177e4 LT |
10 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ | |
0912a5db | 15 | #include <linux/scatterlist.h> |
9dc69230 DM |
16 | #include <linux/of.h> |
17 | #include <linux/of_device.h> | |
1da177e4 | 18 | |
1da177e4 LT |
19 | #include <asm/pgalloc.h> |
20 | #include <asm/pgtable.h> | |
1da177e4 LT |
21 | #include <asm/io.h> |
22 | #include <asm/mxcc.h> | |
23 | #include <asm/mbus.h> | |
24 | #include <asm/cacheflush.h> | |
25 | #include <asm/tlbflush.h> | |
26 | #include <asm/bitext.h> | |
27 | #include <asm/iommu.h> | |
28 | #include <asm/dma.h> | |
29 | ||
e8c29c83 SR |
30 | #include "mm_32.h" |
31 | ||
1da177e4 LT |
32 | /* |
33 | * This can be sized dynamically, but we will do this | |
34 | * only when we have a guidance about actual I/O pressures. | |
35 | */ | |
36 | #define IOMMU_RNGE IOMMU_RNGE_256MB | |
37 | #define IOMMU_START 0xF0000000 | |
38 | #define IOMMU_WINSIZE (256*1024*1024U) | |
9a0ac1b6 | 39 | #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */ |
1da177e4 LT |
40 | #define IOMMU_ORDER 6 /* 4096 * (1<<6) */ |
41 | ||
1da177e4 LT |
42 | static int viking_flush; |
43 | /* viking.S */ | |
44 | extern void viking_flush_page(unsigned long page); | |
45 | extern void viking_mxcc_flush_page(unsigned long page); | |
46 | ||
47 | /* | |
48 | * Values precomputed according to CPU type. | |
49 | */ | |
50 | static unsigned int ioperm_noc; /* Consistent mapping iopte flags */ | |
51 | static pgprot_t dvma_prot; /* Consistent mapping pte flags */ | |
52 | ||
53 | #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID) | |
54 | #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ) | |
55 | ||
cd4cd730 | 56 | static void __init sbus_iommu_init(struct platform_device *op) |
1da177e4 | 57 | { |
1da177e4 | 58 | struct iommu_struct *iommu; |
e0039348 | 59 | unsigned int impl, vers; |
1da177e4 | 60 | unsigned long *bitmap; |
e0039348 DM |
61 | unsigned long tmp; |
62 | ||
71cd03b0 | 63 | iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL); |
1da177e4 LT |
64 | if (!iommu) { |
65 | prom_printf("Unable to allocate iommu structure\n"); | |
66 | prom_halt(); | |
67 | } | |
e0039348 | 68 | |
046e26a8 | 69 | iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3, |
e0039348 | 70 | "iommu_regs"); |
1da177e4 LT |
71 | if (!iommu->regs) { |
72 | prom_printf("Cannot map IOMMU registers\n"); | |
73 | prom_halt(); | |
74 | } | |
75 | impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28; | |
76 | vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24; | |
77 | tmp = iommu->regs->control; | |
78 | tmp &= ~(IOMMU_CTRL_RNGE); | |
79 | tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB); | |
80 | iommu->regs->control = tmp; | |
81 | iommu_invalidate(iommu->regs); | |
82 | iommu->start = IOMMU_START; | |
83 | iommu->end = 0xffffffff; | |
84 | ||
85 | /* Allocate IOMMU page table */ | |
86 | /* Stupid alignment constraints give me a headache. | |
87 | We need 256K or 512K or 1M or 2M area aligned to | |
88 | its size and current gfp will fortunately give | |
89 | it to us. */ | |
90 | tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER); | |
91 | if (!tmp) { | |
5da444aa AM |
92 | prom_printf("Unable to allocate iommu table [0x%lx]\n", |
93 | IOMMU_NPTES * sizeof(iopte_t)); | |
1da177e4 LT |
94 | prom_halt(); |
95 | } | |
96 | iommu->page_table = (iopte_t *)tmp; | |
97 | ||
98 | /* Initialize new table. */ | |
99 | memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t)); | |
100 | flush_cache_all(); | |
101 | flush_tlb_all(); | |
102 | iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4; | |
103 | iommu_invalidate(iommu->regs); | |
104 | ||
105 | bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL); | |
106 | if (!bitmap) { | |
107 | prom_printf("Unable to allocate iommu bitmap [%d]\n", | |
108 | (int)(IOMMU_NPTES>>3)); | |
109 | prom_halt(); | |
110 | } | |
111 | bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES); | |
112 | /* To be coherent on HyperSparc, the page color of DVMA | |
113 | * and physical addresses must match. | |
114 | */ | |
115 | if (srmmu_modtype == HyperSparc) | |
116 | iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT; | |
117 | else | |
118 | iommu->usemap.num_colors = 1; | |
119 | ||
046e26a8 DM |
120 | printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n", |
121 | impl, vers, iommu->page_table, | |
122 | (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES); | |
1da177e4 | 123 | |
e0039348 | 124 | op->dev.archdata.iommu = iommu; |
1da177e4 LT |
125 | } |
126 | ||
046e26a8 DM |
127 | static int __init iommu_init(void) |
128 | { | |
129 | struct device_node *dp; | |
130 | ||
131 | for_each_node_by_name(dp, "iommu") { | |
cd4cd730 | 132 | struct platform_device *op = of_find_device_by_node(dp); |
046e26a8 DM |
133 | |
134 | sbus_iommu_init(op); | |
135 | of_propagate_archdata(op); | |
136 | } | |
137 | ||
138 | return 0; | |
139 | } | |
140 | ||
141 | subsys_initcall(iommu_init); | |
142 | ||
1da177e4 LT |
143 | /* Flush the iotlb entries to ram. */ |
144 | /* This could be better if we didn't have to flush whole pages. */ | |
145 | static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte) | |
146 | { | |
147 | unsigned long start; | |
148 | unsigned long end; | |
149 | ||
3185d4d2 | 150 | start = (unsigned long)iopte; |
1da177e4 | 151 | end = PAGE_ALIGN(start + niopte*sizeof(iopte_t)); |
3185d4d2 | 152 | start &= PAGE_MASK; |
1da177e4 LT |
153 | if (viking_mxcc_present) { |
154 | while(start < end) { | |
155 | viking_mxcc_flush_page(start); | |
156 | start += PAGE_SIZE; | |
157 | } | |
158 | } else if (viking_flush) { | |
159 | while(start < end) { | |
160 | viking_flush_page(start); | |
161 | start += PAGE_SIZE; | |
162 | } | |
163 | } else { | |
164 | while(start < end) { | |
165 | __flush_page_to_ram(start); | |
166 | start += PAGE_SIZE; | |
167 | } | |
168 | } | |
169 | } | |
170 | ||
260489fa | 171 | static u32 iommu_get_one(struct device *dev, struct page *page, int npages) |
1da177e4 | 172 | { |
260489fa | 173 | struct iommu_struct *iommu = dev->archdata.iommu; |
1da177e4 LT |
174 | int ioptex; |
175 | iopte_t *iopte, *iopte0; | |
176 | unsigned int busa, busa0; | |
177 | int i; | |
178 | ||
179 | /* page color = pfn of page */ | |
180 | ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); | |
181 | if (ioptex < 0) | |
182 | panic("iommu out"); | |
183 | busa0 = iommu->start + (ioptex << PAGE_SHIFT); | |
184 | iopte0 = &iommu->page_table[ioptex]; | |
185 | ||
186 | busa = busa0; | |
187 | iopte = iopte0; | |
188 | for (i = 0; i < npages; i++) { | |
189 | iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM); | |
190 | iommu_invalidate_page(iommu->regs, busa); | |
191 | busa += PAGE_SIZE; | |
192 | iopte++; | |
193 | page++; | |
194 | } | |
195 | ||
196 | iommu_flush_iotlb(iopte0, npages); | |
197 | ||
198 | return busa0; | |
199 | } | |
200 | ||
260489fa | 201 | static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len) |
1da177e4 LT |
202 | { |
203 | unsigned long off; | |
204 | int npages; | |
205 | struct page *page; | |
206 | u32 busa; | |
207 | ||
208 | off = (unsigned long)vaddr & ~PAGE_MASK; | |
209 | npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; | |
210 | page = virt_to_page((unsigned long)vaddr & PAGE_MASK); | |
260489fa | 211 | busa = iommu_get_one(dev, page, npages); |
1da177e4 LT |
212 | return busa + off; |
213 | } | |
214 | ||
260489fa | 215 | static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len) |
1da177e4 LT |
216 | { |
217 | flush_page_for_dma(0); | |
260489fa | 218 | return iommu_get_scsi_one(dev, vaddr, len); |
1da177e4 LT |
219 | } |
220 | ||
260489fa | 221 | static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len) |
1da177e4 LT |
222 | { |
223 | unsigned long page = ((unsigned long) vaddr) & PAGE_MASK; | |
224 | ||
225 | while(page < ((unsigned long)(vaddr + len))) { | |
226 | flush_page_for_dma(page); | |
227 | page += PAGE_SIZE; | |
228 | } | |
260489fa | 229 | return iommu_get_scsi_one(dev, vaddr, len); |
1da177e4 LT |
230 | } |
231 | ||
260489fa | 232 | static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz) |
1da177e4 LT |
233 | { |
234 | int n; | |
235 | ||
236 | flush_page_for_dma(0); | |
237 | while (sz != 0) { | |
238 | --sz; | |
239 | n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | |
aa83a26a RR |
240 | sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; |
241 | sg->dma_length = sg->length; | |
0912a5db | 242 | sg = sg_next(sg); |
1da177e4 LT |
243 | } |
244 | } | |
245 | ||
260489fa | 246 | static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz) |
1da177e4 LT |
247 | { |
248 | unsigned long page, oldpage = 0; | |
249 | int n, i; | |
250 | ||
251 | while(sz != 0) { | |
252 | --sz; | |
253 | ||
254 | n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | |
255 | ||
256 | /* | |
257 | * We expect unmapped highmem pages to be not in the cache. | |
258 | * XXX Is this a good assumption? | |
259 | * XXX What if someone else unmaps it here and races us? | |
260 | */ | |
58b053e4 | 261 | if ((page = (unsigned long) page_address(sg_page(sg))) != 0) { |
1da177e4 LT |
262 | for (i = 0; i < n; i++) { |
263 | if (page != oldpage) { /* Already flushed? */ | |
264 | flush_page_for_dma(page); | |
265 | oldpage = page; | |
266 | } | |
267 | page += PAGE_SIZE; | |
268 | } | |
269 | } | |
270 | ||
aa83a26a RR |
271 | sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; |
272 | sg->dma_length = sg->length; | |
0912a5db | 273 | sg = sg_next(sg); |
1da177e4 LT |
274 | } |
275 | } | |
276 | ||
260489fa | 277 | static void iommu_release_one(struct device *dev, u32 busa, int npages) |
1da177e4 | 278 | { |
260489fa | 279 | struct iommu_struct *iommu = dev->archdata.iommu; |
1da177e4 LT |
280 | int ioptex; |
281 | int i; | |
282 | ||
1ae61388 | 283 | BUG_ON(busa < iommu->start); |
1da177e4 LT |
284 | ioptex = (busa - iommu->start) >> PAGE_SHIFT; |
285 | for (i = 0; i < npages; i++) { | |
286 | iopte_val(iommu->page_table[ioptex + i]) = 0; | |
287 | iommu_invalidate_page(iommu->regs, busa); | |
288 | busa += PAGE_SIZE; | |
289 | } | |
290 | bit_map_clear(&iommu->usemap, ioptex, npages); | |
291 | } | |
292 | ||
260489fa | 293 | static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len) |
1da177e4 LT |
294 | { |
295 | unsigned long off; | |
296 | int npages; | |
297 | ||
298 | off = vaddr & ~PAGE_MASK; | |
299 | npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; | |
260489fa | 300 | iommu_release_one(dev, vaddr & PAGE_MASK, npages); |
1da177e4 LT |
301 | } |
302 | ||
260489fa | 303 | static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) |
1da177e4 LT |
304 | { |
305 | int n; | |
306 | ||
307 | while(sz != 0) { | |
308 | --sz; | |
309 | ||
310 | n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | |
aa83a26a RR |
311 | iommu_release_one(dev, sg->dma_address & PAGE_MASK, n); |
312 | sg->dma_address = 0x21212121; | |
0912a5db | 313 | sg = sg_next(sg); |
1da177e4 LT |
314 | } |
315 | } | |
316 | ||
317 | #ifdef CONFIG_SBUS | |
4b1c5df2 DM |
318 | static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, |
319 | unsigned long addr, int len) | |
1da177e4 | 320 | { |
4b1c5df2 | 321 | struct iommu_struct *iommu = dev->archdata.iommu; |
1da177e4 | 322 | unsigned long page, end; |
1da177e4 LT |
323 | iopte_t *iopte = iommu->page_table; |
324 | iopte_t *first; | |
325 | int ioptex; | |
326 | ||
1ae61388 ES |
327 | BUG_ON((va & ~PAGE_MASK) != 0); |
328 | BUG_ON((addr & ~PAGE_MASK) != 0); | |
329 | BUG_ON((len & ~PAGE_MASK) != 0); | |
1da177e4 LT |
330 | |
331 | /* page color = physical address */ | |
332 | ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, | |
333 | addr >> PAGE_SHIFT); | |
334 | if (ioptex < 0) | |
335 | panic("iommu out"); | |
336 | ||
337 | iopte += ioptex; | |
338 | first = iopte; | |
339 | end = addr + len; | |
340 | while(addr < end) { | |
341 | page = va; | |
342 | { | |
343 | pgd_t *pgdp; | |
344 | pmd_t *pmdp; | |
345 | pte_t *ptep; | |
346 | ||
347 | if (viking_mxcc_present) | |
348 | viking_mxcc_flush_page(page); | |
349 | else if (viking_flush) | |
350 | viking_flush_page(page); | |
351 | else | |
352 | __flush_page_to_ram(page); | |
353 | ||
354 | pgdp = pgd_offset(&init_mm, addr); | |
355 | pmdp = pmd_offset(pgdp, addr); | |
356 | ptep = pte_offset_map(pmdp, addr); | |
357 | ||
358 | set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); | |
359 | } | |
360 | iopte_val(*iopte++) = | |
361 | MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc); | |
362 | addr += PAGE_SIZE; | |
363 | va += PAGE_SIZE; | |
364 | } | |
365 | /* P3: why do we need this? | |
366 | * | |
367 | * DAVEM: Because there are several aspects, none of which | |
368 | * are handled by a single interface. Some cpus are | |
369 | * completely not I/O DMA coherent, and some have | |
370 | * virtually indexed caches. The driver DMA flushing | |
371 | * methods handle the former case, but here during | |
372 | * IOMMU page table modifications, and usage of non-cacheable | |
373 | * cpu mappings of pages potentially in the cpu caches, we have | |
374 | * to handle the latter case as well. | |
375 | */ | |
376 | flush_cache_all(); | |
377 | iommu_flush_iotlb(first, len >> PAGE_SHIFT); | |
378 | flush_tlb_all(); | |
379 | iommu_invalidate(iommu->regs); | |
380 | ||
381 | *pba = iommu->start + (ioptex << PAGE_SHIFT); | |
382 | return 0; | |
383 | } | |
384 | ||
4b1c5df2 | 385 | static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len) |
1da177e4 | 386 | { |
4b1c5df2 | 387 | struct iommu_struct *iommu = dev->archdata.iommu; |
1da177e4 LT |
388 | iopte_t *iopte = iommu->page_table; |
389 | unsigned long end; | |
390 | int ioptex = (busa - iommu->start) >> PAGE_SHIFT; | |
391 | ||
1ae61388 ES |
392 | BUG_ON((busa & ~PAGE_MASK) != 0); |
393 | BUG_ON((len & ~PAGE_MASK) != 0); | |
1da177e4 LT |
394 | |
395 | iopte += ioptex; | |
396 | end = busa + len; | |
397 | while (busa < end) { | |
398 | iopte_val(*iopte++) = 0; | |
399 | busa += PAGE_SIZE; | |
400 | } | |
401 | flush_tlb_all(); | |
402 | iommu_invalidate(iommu->regs); | |
403 | bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT); | |
404 | } | |
1da177e4 LT |
405 | #endif |
406 | ||
d894d964 DM |
407 | static const struct sparc32_dma_ops iommu_dma_gflush_ops = { |
408 | .get_scsi_one = iommu_get_scsi_one_gflush, | |
409 | .get_scsi_sgl = iommu_get_scsi_sgl_gflush, | |
410 | .release_scsi_one = iommu_release_scsi_one, | |
411 | .release_scsi_sgl = iommu_release_scsi_sgl, | |
412 | #ifdef CONFIG_SBUS | |
413 | .map_dma_area = iommu_map_dma_area, | |
414 | .unmap_dma_area = iommu_unmap_dma_area, | |
415 | #endif | |
416 | }; | |
417 | ||
418 | static const struct sparc32_dma_ops iommu_dma_pflush_ops = { | |
419 | .get_scsi_one = iommu_get_scsi_one_pflush, | |
420 | .get_scsi_sgl = iommu_get_scsi_sgl_pflush, | |
421 | .release_scsi_one = iommu_release_scsi_one, | |
422 | .release_scsi_sgl = iommu_release_scsi_sgl, | |
423 | #ifdef CONFIG_SBUS | |
424 | .map_dma_area = iommu_map_dma_area, | |
425 | .unmap_dma_area = iommu_unmap_dma_area, | |
426 | #endif | |
427 | }; | |
428 | ||
1da177e4 LT |
429 | void __init ld_mmu_iommu(void) |
430 | { | |
5d83d666 | 431 | if (flush_page_for_dma_global) { |
1da177e4 | 432 | /* flush_page_for_dma flushes everything, no matter of what page is it */ |
d894d964 | 433 | sparc32_dma_ops = &iommu_dma_gflush_ops; |
1da177e4 | 434 | } else { |
d894d964 | 435 | sparc32_dma_ops = &iommu_dma_pflush_ops; |
1da177e4 | 436 | } |
1da177e4 LT |
437 | |
438 | if (viking_mxcc_present || srmmu_modtype == HyperSparc) { | |
439 | dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); | |
440 | ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID; | |
441 | } else { | |
442 | dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV); | |
443 | ioperm_noc = IOPTE_WRITE | IOPTE_VALID; | |
444 | } | |
445 | } |