]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
f30c2269 | 2 | * linux/arch/m68k/sun3/sun3dvma.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2000 Sam Creasey | |
5 | * | |
6 | * Contains common routines for sun3/sun3x DVMA management. | |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/kernel.h> |
10 | #include <linux/mm.h> | |
11 | #include <linux/list.h> | |
12 | ||
13 | #include <asm/page.h> | |
14 | #include <asm/pgtable.h> | |
15 | #include <asm/dvma.h> | |
16 | ||
17 | #undef DVMA_DEBUG | |
18 | ||
19 | #ifdef CONFIG_SUN3X | |
20 | extern void dvma_unmap_iommu(unsigned long baddr, int len); | |
21 | #else | |
22 | static inline void dvma_unmap_iommu(unsigned long a, int b) | |
23 | { | |
24 | } | |
25 | #endif | |
26 | ||
27 | #ifdef CONFIG_SUN3 | |
28 | extern void sun3_dvma_init(void); | |
29 | #endif | |
30 | ||
31 | unsigned long iommu_use[IOMMU_TOTAL_ENTRIES]; | |
32 | ||
33 | #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT) | |
34 | ||
35 | #define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)]) | |
36 | ||
37 | struct hole { | |
38 | unsigned long start; | |
39 | unsigned long end; | |
40 | unsigned long size; | |
41 | struct list_head list; | |
42 | }; | |
43 | ||
44 | static struct list_head hole_list; | |
45 | static struct list_head hole_cache; | |
46 | static struct hole initholes[64]; | |
47 | ||
48 | #ifdef DVMA_DEBUG | |
49 | ||
50 | static unsigned long dvma_allocs; | |
51 | static unsigned long dvma_frees; | |
52 | static unsigned long long dvma_alloc_bytes; | |
53 | static unsigned long long dvma_free_bytes; | |
54 | ||
55 | static void print_use(void) | |
56 | { | |
57 | ||
58 | int i; | |
59 | int j = 0; | |
60 | ||
61 | printk("dvma entry usage:\n"); | |
62 | ||
63 | for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) { | |
64 | if(!iommu_use[i]) | |
65 | continue; | |
66 | ||
67 | j++; | |
68 | ||
69 | printk("dvma entry: %08lx len %08lx\n", | |
70 | ( i << DVMA_PAGE_SHIFT) + DVMA_START, | |
71 | iommu_use[i]); | |
72 | } | |
73 | ||
74 | printk("%d entries in use total\n", j); | |
75 | ||
76 | printk("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees); | |
77 | printk("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes, | |
78 | dvma_free_bytes); | |
79 | } | |
80 | ||
81 | static void print_holes(struct list_head *holes) | |
82 | { | |
83 | ||
84 | struct list_head *cur; | |
85 | struct hole *hole; | |
86 | ||
87 | printk("listing dvma holes\n"); | |
88 | list_for_each(cur, holes) { | |
89 | hole = list_entry(cur, struct hole, list); | |
90 | ||
91 | if((hole->start == 0) && (hole->end == 0) && (hole->size == 0)) | |
92 | continue; | |
93 | ||
94 | printk("hole: start %08lx end %08lx size %08lx\n", hole->start, hole->end, hole->size); | |
95 | } | |
96 | ||
97 | printk("end of hole listing...\n"); | |
98 | ||
99 | } | |
100 | #endif /* DVMA_DEBUG */ | |
101 | ||
102 | static inline int refill(void) | |
103 | { | |
104 | ||
105 | struct hole *hole; | |
106 | struct hole *prev = NULL; | |
107 | struct list_head *cur; | |
108 | int ret = 0; | |
109 | ||
110 | list_for_each(cur, &hole_list) { | |
111 | hole = list_entry(cur, struct hole, list); | |
112 | ||
113 | if(!prev) { | |
114 | prev = hole; | |
115 | continue; | |
116 | } | |
117 | ||
118 | if(hole->end == prev->start) { | |
119 | hole->size += prev->size; | |
120 | hole->end = prev->end; | |
a7addcea | 121 | list_move(&(prev->list), &hole_cache); |
1da177e4 LT |
122 | ret++; |
123 | } | |
124 | ||
125 | } | |
126 | ||
127 | return ret; | |
128 | } | |
129 | ||
130 | static inline struct hole *rmcache(void) | |
131 | { | |
132 | struct hole *ret; | |
133 | ||
134 | if(list_empty(&hole_cache)) { | |
135 | if(!refill()) { | |
136 | printk("out of dvma hole cache!\n"); | |
137 | BUG(); | |
138 | } | |
139 | } | |
140 | ||
141 | ret = list_entry(hole_cache.next, struct hole, list); | |
142 | list_del(&(ret->list)); | |
143 | ||
144 | return ret; | |
145 | ||
146 | } | |
147 | ||
148 | static inline unsigned long get_baddr(int len, unsigned long align) | |
149 | { | |
150 | ||
151 | struct list_head *cur; | |
152 | struct hole *hole; | |
153 | ||
154 | if(list_empty(&hole_list)) { | |
155 | #ifdef DVMA_DEBUG | |
156 | printk("out of dvma holes! (printing hole cache)\n"); | |
157 | print_holes(&hole_cache); | |
158 | print_use(); | |
159 | #endif | |
160 | BUG(); | |
161 | } | |
162 | ||
163 | list_for_each(cur, &hole_list) { | |
164 | unsigned long newlen; | |
165 | ||
166 | hole = list_entry(cur, struct hole, list); | |
167 | ||
168 | if(align > DVMA_PAGE_SIZE) | |
169 | newlen = len + ((hole->end - len) & (align-1)); | |
170 | else | |
171 | newlen = len; | |
172 | ||
173 | if(hole->size > newlen) { | |
174 | hole->end -= newlen; | |
175 | hole->size -= newlen; | |
176 | dvma_entry_use(hole->end) = newlen; | |
177 | #ifdef DVMA_DEBUG | |
178 | dvma_allocs++; | |
179 | dvma_alloc_bytes += newlen; | |
180 | #endif | |
181 | return hole->end; | |
182 | } else if(hole->size == newlen) { | |
a7addcea | 183 | list_move(&(hole->list), &hole_cache); |
1da177e4 LT |
184 | dvma_entry_use(hole->start) = newlen; |
185 | #ifdef DVMA_DEBUG | |
186 | dvma_allocs++; | |
187 | dvma_alloc_bytes += newlen; | |
188 | #endif | |
189 | return hole->start; | |
190 | } | |
191 | ||
192 | } | |
193 | ||
194 | printk("unable to find dvma hole!\n"); | |
195 | BUG(); | |
196 | return 0; | |
197 | } | |
198 | ||
199 | static inline int free_baddr(unsigned long baddr) | |
200 | { | |
201 | ||
202 | unsigned long len; | |
203 | struct hole *hole; | |
204 | struct list_head *cur; | |
205 | unsigned long orig_baddr; | |
206 | ||
207 | orig_baddr = baddr; | |
208 | len = dvma_entry_use(baddr); | |
209 | dvma_entry_use(baddr) = 0; | |
210 | baddr &= DVMA_PAGE_MASK; | |
211 | dvma_unmap_iommu(baddr, len); | |
212 | ||
213 | #ifdef DVMA_DEBUG | |
214 | dvma_frees++; | |
215 | dvma_free_bytes += len; | |
216 | #endif | |
217 | ||
218 | list_for_each(cur, &hole_list) { | |
219 | hole = list_entry(cur, struct hole, list); | |
220 | ||
221 | if(hole->end == baddr) { | |
222 | hole->end += len; | |
223 | hole->size += len; | |
224 | return 0; | |
225 | } else if(hole->start == (baddr + len)) { | |
226 | hole->start = baddr; | |
227 | hole->size += len; | |
228 | return 0; | |
229 | } | |
230 | ||
231 | } | |
232 | ||
233 | hole = rmcache(); | |
234 | ||
235 | hole->start = baddr; | |
236 | hole->end = baddr + len; | |
237 | hole->size = len; | |
238 | ||
239 | // list_add_tail(&(hole->list), cur); | |
240 | list_add(&(hole->list), cur); | |
241 | ||
242 | return 0; | |
243 | ||
244 | } | |
245 | ||
246 | void dvma_init(void) | |
247 | { | |
248 | ||
249 | struct hole *hole; | |
250 | int i; | |
251 | ||
252 | INIT_LIST_HEAD(&hole_list); | |
253 | INIT_LIST_HEAD(&hole_cache); | |
254 | ||
255 | /* prepare the hole cache */ | |
256 | for(i = 0; i < 64; i++) | |
257 | list_add(&(initholes[i].list), &hole_cache); | |
258 | ||
259 | hole = rmcache(); | |
260 | hole->start = DVMA_START; | |
261 | hole->end = DVMA_END; | |
262 | hole->size = DVMA_SIZE; | |
263 | ||
264 | list_add(&(hole->list), &hole_list); | |
265 | ||
266 | memset(iommu_use, 0, sizeof(iommu_use)); | |
267 | ||
268 | dvma_unmap_iommu(DVMA_START, DVMA_SIZE); | |
269 | ||
270 | #ifdef CONFIG_SUN3 | |
271 | sun3_dvma_init(); | |
272 | #endif | |
273 | ||
274 | } | |
275 | ||
276 | inline unsigned long dvma_map_align(unsigned long kaddr, int len, int align) | |
277 | { | |
278 | ||
279 | unsigned long baddr; | |
280 | unsigned long off; | |
281 | ||
282 | if(!len) | |
283 | len = 0x800; | |
284 | ||
285 | if(!kaddr || !len) { | |
286 | // printk("error: kaddr %lx len %x\n", kaddr, len); | |
287 | // *(int *)4 = 0; | |
288 | return 0; | |
289 | } | |
290 | ||
291 | #ifdef DEBUG | |
292 | printk("dvma_map request %08lx bytes from %08lx\n", | |
293 | len, kaddr); | |
294 | #endif | |
295 | off = kaddr & ~DVMA_PAGE_MASK; | |
296 | kaddr &= PAGE_MASK; | |
297 | len += off; | |
298 | len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); | |
299 | ||
300 | if(align == 0) | |
301 | align = DVMA_PAGE_SIZE; | |
302 | else | |
303 | align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); | |
304 | ||
305 | baddr = get_baddr(len, align); | |
306 | // printk("using baddr %lx\n", baddr); | |
307 | ||
308 | if(!dvma_map_iommu(kaddr, baddr, len)) | |
309 | return (baddr + off); | |
310 | ||
311 | printk("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, len); | |
312 | BUG(); | |
313 | return 0; | |
314 | } | |
315 | ||
316 | void dvma_unmap(void *baddr) | |
317 | { | |
318 | unsigned long addr; | |
319 | ||
320 | addr = (unsigned long)baddr; | |
321 | /* check if this is a vme mapping */ | |
322 | if(!(addr & 0x00f00000)) | |
323 | addr |= 0xf00000; | |
324 | ||
325 | free_baddr(addr); | |
326 | ||
327 | return; | |
328 | ||
329 | } | |
330 | ||
331 | ||
332 | void *dvma_malloc_align(unsigned long len, unsigned long align) | |
333 | { | |
334 | unsigned long kaddr; | |
335 | unsigned long baddr; | |
336 | unsigned long vaddr; | |
337 | ||
338 | if(!len) | |
339 | return NULL; | |
340 | ||
341 | #ifdef DEBUG | |
342 | printk("dvma_malloc request %lx bytes\n", len); | |
343 | #endif | |
344 | len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); | |
345 | ||
346 | if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0) | |
347 | return NULL; | |
348 | ||
349 | if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) { | |
350 | free_pages(kaddr, get_order(len)); | |
351 | return NULL; | |
352 | } | |
353 | ||
354 | vaddr = dvma_btov(baddr); | |
355 | ||
356 | if(dvma_map_cpu(kaddr, vaddr, len) < 0) { | |
357 | dvma_unmap((void *)baddr); | |
358 | free_pages(kaddr, get_order(len)); | |
359 | return NULL; | |
360 | } | |
361 | ||
362 | #ifdef DEBUG | |
363 | printk("mapped %08lx bytes %08lx kern -> %08lx bus\n", | |
364 | len, kaddr, baddr); | |
365 | #endif | |
366 | ||
367 | return (void *)vaddr; | |
368 | ||
369 | } | |
370 | ||
371 | void dvma_free(void *vaddr) | |
372 | { | |
373 | ||
374 | return; | |
375 | ||
376 | } |