]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/sparc/mm/iommu.c
sparc: Add OF archdata propagation helper.
[mirror_ubuntu-zesty-kernel.git] / arch / sparc / mm / iommu.c
CommitLineData
1da177e4
LT
1/*
2 * iommu.c: IOMMU specific routines for memory management.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
9
1da177e4
LT
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/mm.h>
13#include <linux/slab.h>
14#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
0912a5db 15#include <linux/scatterlist.h>
1da177e4 16
1da177e4
LT
17#include <asm/pgalloc.h>
18#include <asm/pgtable.h>
19#include <asm/sbus.h>
20#include <asm/io.h>
21#include <asm/mxcc.h>
22#include <asm/mbus.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/bitext.h>
26#include <asm/iommu.h>
27#include <asm/dma.h>
28
29/*
30 * This can be sized dynamically, but we will do this
31 * only when we have a guidance about actual I/O pressures.
32 */
33#define IOMMU_RNGE IOMMU_RNGE_256MB
34#define IOMMU_START 0xF0000000
35#define IOMMU_WINSIZE (256*1024*1024U)
36#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
37#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
38
39/* srmmu.c */
40extern int viking_mxcc_present;
41BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
42#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
43extern int flush_page_for_dma_global;
44static int viking_flush;
45/* viking.S */
46extern void viking_flush_page(unsigned long page);
47extern void viking_mxcc_flush_page(unsigned long page);
48
49/*
50 * Values precomputed according to CPU type.
51 */
52static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
53static pgprot_t dvma_prot; /* Consistent mapping pte flags */
54
55#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
56#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
57
e0039348 58void __init iommu_init(struct device_node *parent, struct sbus_bus *sbus)
1da177e4 59{
e0039348 60 struct of_device *parent_op, *op;
1da177e4 61 struct iommu_struct *iommu;
e0039348 62 unsigned int impl, vers;
1da177e4 63 unsigned long *bitmap;
e0039348
DM
64 unsigned long tmp;
65
66 parent_op = of_find_device_by_node(parent);
67 if (!parent_op) {
68 prom_printf("Unable to find IOMMU of_device\n");
69 prom_halt();
70 }
71
72 op = of_find_device_by_node(sbus->ofdev.node);
73 if (!op) {
74 prom_printf("Unable to find SBUS of_device\n");
75 prom_halt();
76 }
1da177e4
LT
77
78 iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
79 if (!iommu) {
80 prom_printf("Unable to allocate iommu structure\n");
81 prom_halt();
82 }
e0039348
DM
83
84 iommu->regs = of_ioremap(&parent_op->resource[0], 0, PAGE_SIZE * 3,
85 "iommu_regs");
1da177e4
LT
86 if (!iommu->regs) {
87 prom_printf("Cannot map IOMMU registers\n");
88 prom_halt();
89 }
90 impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
91 vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
92 tmp = iommu->regs->control;
93 tmp &= ~(IOMMU_CTRL_RNGE);
94 tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
95 iommu->regs->control = tmp;
96 iommu_invalidate(iommu->regs);
97 iommu->start = IOMMU_START;
98 iommu->end = 0xffffffff;
99
100 /* Allocate IOMMU page table */
101 /* Stupid alignment constraints give me a headache.
102 We need 256K or 512K or 1M or 2M area aligned to
103 its size and current gfp will fortunately give
104 it to us. */
105 tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
106 if (!tmp) {
107 prom_printf("Unable to allocate iommu table [0x%08x]\n",
108 IOMMU_NPTES*sizeof(iopte_t));
109 prom_halt();
110 }
111 iommu->page_table = (iopte_t *)tmp;
112
113 /* Initialize new table. */
114 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
115 flush_cache_all();
116 flush_tlb_all();
117 iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
118 iommu_invalidate(iommu->regs);
119
120 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
121 if (!bitmap) {
122 prom_printf("Unable to allocate iommu bitmap [%d]\n",
123 (int)(IOMMU_NPTES>>3));
124 prom_halt();
125 }
126 bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
127 /* To be coherent on HyperSparc, the page color of DVMA
128 * and physical addresses must match.
129 */
130 if (srmmu_modtype == HyperSparc)
131 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
132 else
133 iommu->usemap.num_colors = 1;
134
135 printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
136 impl, vers, iommu->page_table,
137 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
138
3ac4c949 139 sbus->ofdev.dev.archdata.iommu = iommu;
e0039348 140 op->dev.archdata.iommu = iommu;
1da177e4
LT
141}
142
143/* This begs to be btfixup-ed by srmmu. */
144/* Flush the iotlb entries to ram. */
145/* This could be better if we didn't have to flush whole pages. */
146static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
147{
148 unsigned long start;
149 unsigned long end;
150
3185d4d2 151 start = (unsigned long)iopte;
1da177e4 152 end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
3185d4d2 153 start &= PAGE_MASK;
1da177e4
LT
154 if (viking_mxcc_present) {
155 while(start < end) {
156 viking_mxcc_flush_page(start);
157 start += PAGE_SIZE;
158 }
159 } else if (viking_flush) {
160 while(start < end) {
161 viking_flush_page(start);
162 start += PAGE_SIZE;
163 }
164 } else {
165 while(start < end) {
166 __flush_page_to_ram(start);
167 start += PAGE_SIZE;
168 }
169 }
170}
171
260489fa 172static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
1da177e4 173{
260489fa 174 struct iommu_struct *iommu = dev->archdata.iommu;
1da177e4
LT
175 int ioptex;
176 iopte_t *iopte, *iopte0;
177 unsigned int busa, busa0;
178 int i;
179
180 /* page color = pfn of page */
181 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
182 if (ioptex < 0)
183 panic("iommu out");
184 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
185 iopte0 = &iommu->page_table[ioptex];
186
187 busa = busa0;
188 iopte = iopte0;
189 for (i = 0; i < npages; i++) {
190 iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
191 iommu_invalidate_page(iommu->regs, busa);
192 busa += PAGE_SIZE;
193 iopte++;
194 page++;
195 }
196
197 iommu_flush_iotlb(iopte0, npages);
198
199 return busa0;
200}
201
260489fa 202static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
1da177e4
LT
203{
204 unsigned long off;
205 int npages;
206 struct page *page;
207 u32 busa;
208
209 off = (unsigned long)vaddr & ~PAGE_MASK;
210 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
211 page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
260489fa 212 busa = iommu_get_one(dev, page, npages);
1da177e4
LT
213 return busa + off;
214}
215
260489fa 216static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
1da177e4 217{
260489fa 218 return iommu_get_scsi_one(dev, vaddr, len);
1da177e4
LT
219}
220
260489fa 221static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
1da177e4
LT
222{
223 flush_page_for_dma(0);
260489fa 224 return iommu_get_scsi_one(dev, vaddr, len);
1da177e4
LT
225}
226
260489fa 227static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
1da177e4
LT
228{
229 unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
230
231 while(page < ((unsigned long)(vaddr + len))) {
232 flush_page_for_dma(page);
233 page += PAGE_SIZE;
234 }
260489fa 235 return iommu_get_scsi_one(dev, vaddr, len);
1da177e4
LT
236}
237
260489fa 238static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
1da177e4
LT
239{
240 int n;
241
242 while (sz != 0) {
243 --sz;
244 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
260489fa 245 sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
1da177e4 246 sg->dvma_length = (__u32) sg->length;
0912a5db 247 sg = sg_next(sg);
1da177e4
LT
248 }
249}
250
260489fa 251static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
1da177e4
LT
252{
253 int n;
254
255 flush_page_for_dma(0);
256 while (sz != 0) {
257 --sz;
258 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
260489fa 259 sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
1da177e4 260 sg->dvma_length = (__u32) sg->length;
0912a5db 261 sg = sg_next(sg);
1da177e4
LT
262 }
263}
264
260489fa 265static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
1da177e4
LT
266{
267 unsigned long page, oldpage = 0;
268 int n, i;
269
270 while(sz != 0) {
271 --sz;
272
273 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
274
275 /*
276 * We expect unmapped highmem pages to be not in the cache.
277 * XXX Is this a good assumption?
278 * XXX What if someone else unmaps it here and races us?
279 */
58b053e4 280 if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
1da177e4
LT
281 for (i = 0; i < n; i++) {
282 if (page != oldpage) { /* Already flushed? */
283 flush_page_for_dma(page);
284 oldpage = page;
285 }
286 page += PAGE_SIZE;
287 }
288 }
289
260489fa 290 sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
1da177e4 291 sg->dvma_length = (__u32) sg->length;
0912a5db 292 sg = sg_next(sg);
1da177e4
LT
293 }
294}
295
260489fa 296static void iommu_release_one(struct device *dev, u32 busa, int npages)
1da177e4 297{
260489fa 298 struct iommu_struct *iommu = dev->archdata.iommu;
1da177e4
LT
299 int ioptex;
300 int i;
301
1ae61388 302 BUG_ON(busa < iommu->start);
1da177e4
LT
303 ioptex = (busa - iommu->start) >> PAGE_SHIFT;
304 for (i = 0; i < npages; i++) {
305 iopte_val(iommu->page_table[ioptex + i]) = 0;
306 iommu_invalidate_page(iommu->regs, busa);
307 busa += PAGE_SIZE;
308 }
309 bit_map_clear(&iommu->usemap, ioptex, npages);
310}
311
260489fa 312static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
1da177e4
LT
313{
314 unsigned long off;
315 int npages;
316
317 off = vaddr & ~PAGE_MASK;
318 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
260489fa 319 iommu_release_one(dev, vaddr & PAGE_MASK, npages);
1da177e4
LT
320}
321
260489fa 322static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
1da177e4
LT
323{
324 int n;
325
326 while(sz != 0) {
327 --sz;
328
329 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
260489fa 330 iommu_release_one(dev, sg->dvma_address & PAGE_MASK, n);
1da177e4 331 sg->dvma_address = 0x21212121;
0912a5db 332 sg = sg_next(sg);
1da177e4
LT
333 }
334}
335
336#ifdef CONFIG_SBUS
4b1c5df2
DM
337static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
338 unsigned long addr, int len)
1da177e4 339{
4b1c5df2 340 struct iommu_struct *iommu = dev->archdata.iommu;
1da177e4 341 unsigned long page, end;
1da177e4
LT
342 iopte_t *iopte = iommu->page_table;
343 iopte_t *first;
344 int ioptex;
345
1ae61388
ES
346 BUG_ON((va & ~PAGE_MASK) != 0);
347 BUG_ON((addr & ~PAGE_MASK) != 0);
348 BUG_ON((len & ~PAGE_MASK) != 0);
1da177e4
LT
349
350 /* page color = physical address */
351 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
352 addr >> PAGE_SHIFT);
353 if (ioptex < 0)
354 panic("iommu out");
355
356 iopte += ioptex;
357 first = iopte;
358 end = addr + len;
359 while(addr < end) {
360 page = va;
361 {
362 pgd_t *pgdp;
363 pmd_t *pmdp;
364 pte_t *ptep;
365
366 if (viking_mxcc_present)
367 viking_mxcc_flush_page(page);
368 else if (viking_flush)
369 viking_flush_page(page);
370 else
371 __flush_page_to_ram(page);
372
373 pgdp = pgd_offset(&init_mm, addr);
374 pmdp = pmd_offset(pgdp, addr);
375 ptep = pte_offset_map(pmdp, addr);
376
377 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
378 }
379 iopte_val(*iopte++) =
380 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
381 addr += PAGE_SIZE;
382 va += PAGE_SIZE;
383 }
384 /* P3: why do we need this?
385 *
386 * DAVEM: Because there are several aspects, none of which
387 * are handled by a single interface. Some cpus are
388 * completely not I/O DMA coherent, and some have
389 * virtually indexed caches. The driver DMA flushing
390 * methods handle the former case, but here during
391 * IOMMU page table modifications, and usage of non-cacheable
392 * cpu mappings of pages potentially in the cpu caches, we have
393 * to handle the latter case as well.
394 */
395 flush_cache_all();
396 iommu_flush_iotlb(first, len >> PAGE_SHIFT);
397 flush_tlb_all();
398 iommu_invalidate(iommu->regs);
399
400 *pba = iommu->start + (ioptex << PAGE_SHIFT);
401 return 0;
402}
403
4b1c5df2 404static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
1da177e4 405{
4b1c5df2 406 struct iommu_struct *iommu = dev->archdata.iommu;
1da177e4
LT
407 iopte_t *iopte = iommu->page_table;
408 unsigned long end;
409 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
410
1ae61388
ES
411 BUG_ON((busa & ~PAGE_MASK) != 0);
412 BUG_ON((len & ~PAGE_MASK) != 0);
1da177e4
LT
413
414 iopte += ioptex;
415 end = busa + len;
416 while (busa < end) {
417 iopte_val(*iopte++) = 0;
418 busa += PAGE_SIZE;
419 }
420 flush_tlb_all();
421 iommu_invalidate(iommu->regs);
422 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
423}
1da177e4
LT
424#endif
425
426static char *iommu_lockarea(char *vaddr, unsigned long len)
427{
428 return vaddr;
429}
430
431static void iommu_unlockarea(char *vaddr, unsigned long len)
432{
433}
434
435void __init ld_mmu_iommu(void)
436{
437 viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
438 BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
439 BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
440
441 if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
442 /* IO coherent chip */
443 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
444 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
445 } else if (flush_page_for_dma_global) {
446 /* flush_page_for_dma flushes everything, no matter of what page is it */
447 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
448 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
449 } else {
450 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
451 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
452 }
453 BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
454 BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
455
456#ifdef CONFIG_SBUS
457 BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
458 BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
1da177e4
LT
459#endif
460
461 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
462 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
463 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
464 } else {
465 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
466 ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
467 }
468}