]>
Commit | Line | Data |
---|---|---|
10b88a4b SV |
1 | /* |
2 | * IOMMU mmap management and range allocation functions. | |
3 | * Based almost entirely upon the powerpc iommu allocator. | |
4 | */ | |
5 | ||
6 | #include <linux/export.h> | |
7 | #include <linux/bitmap.h> | |
8 | #include <linux/bug.h> | |
9 | #include <linux/iommu-helper.h> | |
10 | #include <linux/iommu-common.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | ||
cb97201c SV |
13 | #ifndef DMA_ERROR_CODE |
14 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | |
15 | #endif | |
16 | ||
10b88a4b SV |
17 | #define IOMMU_LARGE_ALLOC 15 |
18 | ||
19 | /* | |
20 | * Initialize iommu_pool entries for the iommu_table. `num_entries' | |
21 | * is the number of table entries. If `large_pool' is set to true, | |
22 | * the top 1/4 of the table will be set aside for pool allocations | |
23 | * of more than IOMMU_LARGE_ALLOC pages. | |
24 | */ | |
25 | extern void iommu_tbl_pool_init(struct iommu_table *iommu, | |
26 | unsigned long num_entries, | |
27 | u32 page_table_shift, | |
28 | const struct iommu_tbl_ops *iommu_tbl_ops, | |
29 | bool large_pool, u32 npools) | |
30 | { | |
31 | unsigned int start, i; | |
32 | struct iommu_pool *p = &(iommu->large_pool); | |
33 | ||
34 | if (npools == 0) | |
35 | iommu->nr_pools = IOMMU_NR_POOLS; | |
36 | else | |
37 | iommu->nr_pools = npools; | |
38 | BUG_ON(npools > IOMMU_NR_POOLS); | |
39 | ||
40 | iommu->page_table_shift = page_table_shift; | |
41 | iommu->iommu_tbl_ops = iommu_tbl_ops; | |
42 | start = 0; | |
43 | if (large_pool) | |
44 | iommu->flags |= IOMMU_HAS_LARGE_POOL; | |
45 | ||
46 | if (!large_pool) | |
47 | iommu->poolsize = num_entries/iommu->nr_pools; | |
48 | else | |
49 | iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; | |
50 | for (i = 0; i < iommu->nr_pools; i++) { | |
51 | spin_lock_init(&(iommu->arena_pool[i].lock)); | |
52 | iommu->arena_pool[i].start = start; | |
53 | iommu->arena_pool[i].hint = start; | |
54 | start += iommu->poolsize; /* start for next pool */ | |
55 | iommu->arena_pool[i].end = start - 1; | |
56 | } | |
57 | if (!large_pool) | |
58 | return; | |
59 | /* initialize large_pool */ | |
60 | spin_lock_init(&(p->lock)); | |
61 | p->start = start; | |
62 | p->hint = p->start; | |
63 | p->end = num_entries; | |
64 | } | |
65 | EXPORT_SYMBOL(iommu_tbl_pool_init); | |
66 | ||
67 | unsigned long iommu_tbl_range_alloc(struct device *dev, | |
68 | struct iommu_table *iommu, | |
69 | unsigned long npages, | |
70 | unsigned long *handle, | |
71 | unsigned int pool_hash) | |
72 | { | |
73 | unsigned long n, end, start, limit, boundary_size; | |
74 | struct iommu_pool *arena; | |
75 | int pass = 0; | |
76 | unsigned int pool_nr; | |
77 | unsigned int npools = iommu->nr_pools; | |
78 | unsigned long flags; | |
79 | bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); | |
80 | bool largealloc = (large_pool && npages > IOMMU_LARGE_ALLOC); | |
81 | unsigned long shift; | |
82 | ||
83 | /* Sanity check */ | |
84 | if (unlikely(npages == 0)) { | |
85 | printk_ratelimited("npages == 0\n"); | |
86 | return DMA_ERROR_CODE; | |
87 | } | |
88 | ||
89 | if (largealloc) { | |
90 | arena = &(iommu->large_pool); | |
91 | spin_lock_irqsave(&arena->lock, flags); | |
92 | pool_nr = 0; /* to keep compiler happy */ | |
93 | } else { | |
94 | /* pick out pool_nr */ | |
95 | pool_nr = pool_hash & (npools - 1); | |
96 | arena = &(iommu->arena_pool[pool_nr]); | |
97 | ||
98 | /* find first available unlocked pool */ | |
99 | while (!spin_trylock_irqsave(&(arena->lock), flags)) { | |
100 | pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); | |
101 | arena = &(iommu->arena_pool[pool_nr]); | |
102 | } | |
103 | } | |
104 | ||
105 | again: | |
106 | if (pass == 0 && handle && *handle && | |
107 | (*handle >= arena->start) && (*handle < arena->end)) | |
108 | start = *handle; | |
109 | else | |
110 | start = arena->hint; | |
111 | ||
112 | limit = arena->end; | |
113 | ||
114 | /* The case below can happen if we have a small segment appended | |
115 | * to a large, or when the previous alloc was at the very end of | |
116 | * the available space. If so, go back to the beginning and flush. | |
117 | */ | |
118 | if (start >= limit) { | |
119 | start = arena->start; | |
120 | if (iommu->iommu_tbl_ops->reset != NULL) | |
121 | iommu->iommu_tbl_ops->reset(iommu); | |
122 | } | |
123 | ||
124 | if (dev) | |
125 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | |
126 | 1 << iommu->page_table_shift); | |
127 | else | |
cb97201c | 128 | boundary_size = ALIGN(1ULL << 32, 1 << iommu->page_table_shift); |
10b88a4b SV |
129 | |
130 | shift = iommu->page_table_map_base >> iommu->page_table_shift; | |
131 | boundary_size = boundary_size >> iommu->page_table_shift; | |
132 | /* | |
133 | * if the iommu has a non-trivial cookie <-> index mapping, we set | |
134 | * things up so that iommu_is_span_boundary() merely checks if the | |
135 | * (index + npages) < num_tsb_entries | |
136 | */ | |
137 | if (iommu->iommu_tbl_ops->cookie_to_index != NULL) { | |
138 | shift = 0; | |
139 | boundary_size = iommu->poolsize * iommu->nr_pools; | |
140 | } | |
141 | n = iommu_area_alloc(iommu->map, limit, start, npages, shift, | |
142 | boundary_size, 0); | |
143 | if (n == -1) { | |
144 | if (likely(pass == 0)) { | |
145 | /* First failure, rescan from the beginning. */ | |
146 | arena->hint = arena->start; | |
147 | if (iommu->iommu_tbl_ops->reset != NULL) | |
148 | iommu->iommu_tbl_ops->reset(iommu); | |
149 | pass++; | |
150 | goto again; | |
151 | } else if (!largealloc && pass <= iommu->nr_pools) { | |
152 | spin_unlock(&(arena->lock)); | |
153 | pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); | |
154 | arena = &(iommu->arena_pool[pool_nr]); | |
155 | while (!spin_trylock(&(arena->lock))) { | |
156 | pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); | |
157 | arena = &(iommu->arena_pool[pool_nr]); | |
158 | } | |
159 | arena->hint = arena->start; | |
160 | pass++; | |
161 | goto again; | |
162 | } else { | |
163 | /* give up */ | |
164 | spin_unlock_irqrestore(&(arena->lock), flags); | |
165 | return DMA_ERROR_CODE; | |
166 | } | |
167 | } | |
168 | ||
169 | end = n + npages; | |
170 | ||
171 | arena->hint = end; | |
172 | ||
173 | /* Update handle for SG allocations */ | |
174 | if (handle) | |
175 | *handle = end; | |
176 | spin_unlock_irqrestore(&(arena->lock), flags); | |
177 | ||
178 | return n; | |
179 | } | |
180 | EXPORT_SYMBOL(iommu_tbl_range_alloc); | |
181 | ||
182 | static struct iommu_pool *get_pool(struct iommu_table *tbl, | |
183 | unsigned long entry) | |
184 | { | |
185 | struct iommu_pool *p; | |
186 | unsigned long largepool_start = tbl->large_pool.start; | |
187 | bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); | |
188 | ||
189 | /* The large pool is the last pool at the top of the table */ | |
190 | if (large_pool && entry >= largepool_start) { | |
191 | p = &tbl->large_pool; | |
192 | } else { | |
193 | unsigned int pool_nr = entry / tbl->poolsize; | |
194 | ||
195 | BUG_ON(pool_nr >= tbl->nr_pools); | |
196 | p = &tbl->arena_pool[pool_nr]; | |
197 | } | |
198 | return p; | |
199 | } | |
200 | ||
201 | void iommu_tbl_range_free(struct iommu_table *iommu, u64 dma_addr, | |
202 | unsigned long npages, bool do_demap, void *demap_arg) | |
203 | { | |
204 | unsigned long entry; | |
205 | struct iommu_pool *pool; | |
206 | unsigned long flags; | |
207 | unsigned long shift = iommu->page_table_shift; | |
208 | ||
209 | if (iommu->iommu_tbl_ops->cookie_to_index != NULL) { | |
210 | entry = (*iommu->iommu_tbl_ops->cookie_to_index)(dma_addr, | |
211 | demap_arg); | |
212 | } else { | |
213 | entry = (dma_addr - iommu->page_table_map_base) >> shift; | |
214 | } | |
215 | pool = get_pool(iommu, entry); | |
216 | ||
217 | spin_lock_irqsave(&(pool->lock), flags); | |
218 | if (do_demap && iommu->iommu_tbl_ops->demap != NULL) | |
219 | (*iommu->iommu_tbl_ops->demap)(demap_arg, entry, npages); | |
220 | ||
221 | bitmap_clear(iommu->map, entry, npages); | |
222 | spin_unlock_irqrestore(&(pool->lock), flags); | |
223 | } | |
224 | EXPORT_SYMBOL(iommu_tbl_range_free); |