]>
Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 | 2 | /* |
c1017a4c | 3 | * Copyright (c) by Jaroslav Kysela <perex@perex.cz> |
1da177e4 LT |
4 | * Copyright (c) by Takashi Iwai <tiwai@suse.de> |
5 | * | |
6 | * EMU10K1 memory page allocation (PTB area) | |
1da177e4 LT |
7 | */ |
8 | ||
1da177e4 | 9 | #include <linux/pci.h> |
5a0e3ad6 | 10 | #include <linux/gfp.h> |
1da177e4 | 11 | #include <linux/time.h> |
62932df8 | 12 | #include <linux/mutex.h> |
d81a6d71 | 13 | #include <linux/export.h> |
62932df8 | 14 | |
1da177e4 LT |
15 | #include <sound/core.h> |
16 | #include <sound/emu10k1.h> | |
17 | ||
18 | /* page arguments of these two macros are Emu page (4096 bytes), not like | |
19 | * aligned pages in others | |
20 | */ | |
21 | #define __set_ptb_entry(emu,page,addr) \ | |
541b9bad MS |
22 | (((__le32 *)(emu)->ptb_pages.area)[page] = \ |
23 | cpu_to_le32(((addr) << (emu->address_mode)) | (page))) | |
24 | #define __get_ptb_entry(emu, page) \ | |
25 | (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page])) | |
1da177e4 LT |
26 | |
27 | #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) | |
7241ea55 PZ |
28 | #define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES) |
29 | #define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES) | |
1da177e4 LT |
30 | /* get aligned page from offset address */ |
31 | #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) | |
32 | /* get offset address from aligned page */ | |
33 | #define aligned_page_offset(page) ((page) << PAGE_SHIFT) | |
34 | ||
541b9bad | 35 | #if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG) |
1da177e4 LT |
36 | /* fill PTB entrie(s) corresponding to page with addr */ |
37 | #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) | |
38 | /* fill PTB entrie(s) corresponding to page with silence pointer */ | |
39 | #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) | |
40 | #else | |
41 | /* fill PTB entries -- we need to fill UNIT_PAGES entries */ | |
eb4698f3 | 42 | static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) |
1da177e4 LT |
43 | { |
44 | int i; | |
45 | page *= UNIT_PAGES; | |
46 | for (i = 0; i < UNIT_PAGES; i++, page++) { | |
47 | __set_ptb_entry(emu, page, addr); | |
541b9bad MS |
48 | dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page, |
49 | (unsigned int)__get_ptb_entry(emu, page)); | |
1da177e4 LT |
50 | addr += EMUPAGESIZE; |
51 | } | |
52 | } | |
eb4698f3 | 53 | static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) |
1da177e4 LT |
54 | { |
55 | int i; | |
56 | page *= UNIT_PAGES; | |
541b9bad | 57 | for (i = 0; i < UNIT_PAGES; i++, page++) { |
1da177e4 LT |
58 | /* do not increment ptr */ |
59 | __set_ptb_entry(emu, page, emu->silent_page.addr); | |
541b9bad MS |
60 | dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n", |
61 | page, (unsigned int)__get_ptb_entry(emu, page)); | |
62 | } | |
1da177e4 LT |
63 | } |
64 | #endif /* PAGE_SIZE */ | |
65 | ||
66 | ||
67 | /* | |
68 | */ | |
eb4698f3 TI |
69 | static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); |
70 | static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); | |
1da177e4 | 71 | |
eb4698f3 | 72 | #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member) |
1da177e4 LT |
73 | |
74 | ||
75 | /* initialize emu10k1 part */ | |
eb4698f3 | 76 | static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk) |
1da177e4 LT |
77 | { |
78 | blk->mapped_page = -1; | |
79 | INIT_LIST_HEAD(&blk->mapped_link); | |
80 | INIT_LIST_HEAD(&blk->mapped_order_link); | |
81 | blk->map_locked = 0; | |
82 | ||
83 | blk->first_page = get_aligned_page(blk->mem.offset); | |
84 | blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1); | |
85 | blk->pages = blk->last_page - blk->first_page + 1; | |
86 | } | |
87 | ||
88 | /* | |
89 | * search empty region on PTB with the given size | |
90 | * | |
91 | * if an empty region is found, return the page and store the next mapped block | |
92 | * in nextp | |
93 | * if not found, return a negative error code. | |
94 | */ | |
eb4698f3 | 95 | static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) |
1da177e4 | 96 | { |
a4463c92 | 97 | int page = 1, found_page = -ENOMEM; |
1da177e4 LT |
98 | int max_size = npages; |
99 | int size; | |
100 | struct list_head *candidate = &emu->mapped_link_head; | |
101 | struct list_head *pos; | |
102 | ||
103 | list_for_each (pos, &emu->mapped_link_head) { | |
eb4698f3 | 104 | struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link); |
da3cec35 TI |
105 | if (blk->mapped_page < 0) |
106 | continue; | |
1da177e4 LT |
107 | size = blk->mapped_page - page; |
108 | if (size == npages) { | |
109 | *nextp = pos; | |
110 | return page; | |
111 | } | |
112 | else if (size > max_size) { | |
113 | /* we look for the maximum empty hole */ | |
114 | max_size = size; | |
115 | candidate = pos; | |
116 | found_page = page; | |
117 | } | |
118 | page = blk->mapped_page + blk->pages; | |
119 | } | |
7241ea55 | 120 | size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page; |
1da177e4 LT |
121 | if (size >= max_size) { |
122 | *nextp = pos; | |
123 | return page; | |
124 | } | |
125 | *nextp = candidate; | |
126 | return found_page; | |
127 | } | |
128 | ||
129 | /* | |
130 | * map a memory block onto emu10k1's PTB | |
131 | * | |
132 | * call with memblk_lock held | |
133 | */ | |
eb4698f3 | 134 | static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) |
1da177e4 LT |
135 | { |
136 | int page, pg; | |
137 | struct list_head *next; | |
138 | ||
139 | page = search_empty_map_area(emu, blk->pages, &next); | |
140 | if (page < 0) /* not found */ | |
141 | return page; | |
a4463c92 MS |
142 | if (page == 0) { |
143 | dev_err(emu->card->dev, "trying to map zero (reserved) page\n"); | |
144 | return -EINVAL; | |
145 | } | |
1da177e4 LT |
146 | /* insert this block in the proper position of mapped list */ |
147 | list_add_tail(&blk->mapped_link, next); | |
148 | /* append this as a newest block in order list */ | |
149 | list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); | |
150 | blk->mapped_page = page; | |
151 | /* fill PTB */ | |
152 | for (pg = blk->first_page; pg <= blk->last_page; pg++) { | |
153 | set_ptb_entry(emu, page, emu->page_addr_table[pg]); | |
154 | page++; | |
155 | } | |
156 | return 0; | |
157 | } | |
158 | ||
159 | /* | |
160 | * unmap the block | |
161 | * return the size of resultant empty pages | |
162 | * | |
163 | * call with memblk_lock held | |
164 | */ | |
eb4698f3 | 165 | static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) |
1da177e4 LT |
166 | { |
167 | int start_page, end_page, mpage, pg; | |
168 | struct list_head *p; | |
eb4698f3 | 169 | struct snd_emu10k1_memblk *q; |
1da177e4 LT |
170 | |
171 | /* calculate the expected size of empty region */ | |
172 | if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) { | |
173 | q = get_emu10k1_memblk(p, mapped_link); | |
174 | start_page = q->mapped_page + q->pages; | |
175 | } else | |
a4463c92 | 176 | start_page = 1; |
1da177e4 LT |
177 | if ((p = blk->mapped_link.next) != &emu->mapped_link_head) { |
178 | q = get_emu10k1_memblk(p, mapped_link); | |
179 | end_page = q->mapped_page; | |
180 | } else | |
7241ea55 | 181 | end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0); |
1da177e4 LT |
182 | |
183 | /* remove links */ | |
184 | list_del(&blk->mapped_link); | |
185 | list_del(&blk->mapped_order_link); | |
186 | /* clear PTB */ | |
187 | mpage = blk->mapped_page; | |
188 | for (pg = blk->first_page; pg <= blk->last_page; pg++) { | |
189 | set_silent_ptb(emu, mpage); | |
190 | mpage++; | |
191 | } | |
192 | blk->mapped_page = -1; | |
193 | return end_page - start_page; /* return the new empty size */ | |
194 | } | |
195 | ||
196 | /* | |
197 | * search empty pages with the given size, and create a memory block | |
198 | * | |
199 | * unlike synth_alloc the memory block is aligned to the page start | |
200 | */ | |
eb4698f3 TI |
201 | static struct snd_emu10k1_memblk * |
202 | search_empty(struct snd_emu10k1 *emu, int size) | |
1da177e4 LT |
203 | { |
204 | struct list_head *p; | |
eb4698f3 | 205 | struct snd_emu10k1_memblk *blk; |
1da177e4 LT |
206 | int page, psize; |
207 | ||
208 | psize = get_aligned_page(size + PAGE_SIZE -1); | |
209 | page = 0; | |
210 | list_for_each(p, &emu->memhdr->block) { | |
211 | blk = get_emu10k1_memblk(p, mem.list); | |
212 | if (page + psize <= blk->first_page) | |
213 | goto __found_pages; | |
214 | page = blk->last_page + 1; | |
215 | } | |
216 | if (page + psize > emu->max_cache_pages) | |
217 | return NULL; | |
218 | ||
219 | __found_pages: | |
220 | /* create a new memory block */ | |
eb4698f3 | 221 | blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev); |
1da177e4 LT |
222 | if (blk == NULL) |
223 | return NULL; | |
224 | blk->mem.offset = aligned_page_offset(page); /* set aligned offset */ | |
225 | emu10k1_memblk_init(blk); | |
226 | return blk; | |
227 | } | |
228 | ||
229 | ||
230 | /* | |
231 | * check if the given pointer is valid for pages | |
232 | */ | |
eb4698f3 | 233 | static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) |
1da177e4 LT |
234 | { |
235 | if (addr & ~emu->dma_mask) { | |
11d42c81 | 236 | dev_err_ratelimited(emu->card->dev, |
6f002b02 TI |
237 | "max memory size is 0x%lx (addr = 0x%lx)!!\n", |
238 | emu->dma_mask, (unsigned long)addr); | |
1da177e4 LT |
239 | return 0; |
240 | } | |
241 | if (addr & (EMUPAGESIZE-1)) { | |
11d42c81 | 242 | dev_err_ratelimited(emu->card->dev, "page is not aligned\n"); |
1da177e4 LT |
243 | return 0; |
244 | } | |
245 | return 1; | |
246 | } | |
247 | ||
248 | /* | |
249 | * map the given memory block on PTB. | |
250 | * if the block is already mapped, update the link order. | |
25985edc | 251 | * if no empty pages are found, tries to release unused memory blocks |
1da177e4 LT |
252 | * and retry the mapping. |
253 | */ | |
eb4698f3 | 254 | int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) |
1da177e4 LT |
255 | { |
256 | int err; | |
257 | int size; | |
258 | struct list_head *p, *nextp; | |
eb4698f3 | 259 | struct snd_emu10k1_memblk *deleted; |
1da177e4 LT |
260 | unsigned long flags; |
261 | ||
262 | spin_lock_irqsave(&emu->memblk_lock, flags); | |
263 | if (blk->mapped_page >= 0) { | |
264 | /* update order link */ | |
292f2b62 WY |
265 | list_move_tail(&blk->mapped_order_link, |
266 | &emu->mapped_order_link_head); | |
1da177e4 LT |
267 | spin_unlock_irqrestore(&emu->memblk_lock, flags); |
268 | return 0; | |
269 | } | |
270 | if ((err = map_memblk(emu, blk)) < 0) { | |
271 | /* no enough page - try to unmap some blocks */ | |
272 | /* starting from the oldest block */ | |
273 | p = emu->mapped_order_link_head.next; | |
274 | for (; p != &emu->mapped_order_link_head; p = nextp) { | |
275 | nextp = p->next; | |
276 | deleted = get_emu10k1_memblk(p, mapped_order_link); | |
277 | if (deleted->map_locked) | |
278 | continue; | |
279 | size = unmap_memblk(emu, deleted); | |
280 | if (size >= blk->pages) { | |
281 | /* ok the empty region is enough large */ | |
282 | err = map_memblk(emu, blk); | |
283 | break; | |
284 | } | |
285 | } | |
286 | } | |
287 | spin_unlock_irqrestore(&emu->memblk_lock, flags); | |
288 | return err; | |
289 | } | |
290 | ||
2dd31dee TI |
291 | EXPORT_SYMBOL(snd_emu10k1_memblk_map); |
292 | ||
1da177e4 LT |
293 | /* |
294 | * page allocation for DMA | |
295 | */ | |
eb4698f3 TI |
296 | struct snd_util_memblk * |
297 | snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream) | |
1da177e4 | 298 | { |
eb4698f3 | 299 | struct snd_pcm_runtime *runtime = substream->runtime; |
eb4698f3 TI |
300 | struct snd_util_memhdr *hdr; |
301 | struct snd_emu10k1_memblk *blk; | |
1da177e4 LT |
302 | int page, err, idx; |
303 | ||
da3cec35 TI |
304 | if (snd_BUG_ON(!emu)) |
305 | return NULL; | |
306 | if (snd_BUG_ON(runtime->dma_bytes <= 0 || | |
7241ea55 | 307 | runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE)) |
da3cec35 | 308 | return NULL; |
1da177e4 | 309 | hdr = emu->memhdr; |
da3cec35 TI |
310 | if (snd_BUG_ON(!hdr)) |
311 | return NULL; | |
1da177e4 | 312 | |
56385a12 JK |
313 | idx = runtime->period_size >= runtime->buffer_size ? |
314 | (emu->delay_pcm_irq * 2) : 0; | |
62932df8 | 315 | mutex_lock(&hdr->block_mutex); |
56385a12 | 316 | blk = search_empty(emu, runtime->dma_bytes + idx); |
1da177e4 | 317 | if (blk == NULL) { |
62932df8 | 318 | mutex_unlock(&hdr->block_mutex); |
1da177e4 LT |
319 | return NULL; |
320 | } | |
321 | /* fill buffer addresses but pointers are not stored so that | |
322 | * snd_free_pci_page() is not called in in synth_free() | |
323 | */ | |
324 | idx = 0; | |
325 | for (page = blk->first_page; page <= blk->last_page; page++, idx++) { | |
77a23f26 | 326 | unsigned long ofs = idx << PAGE_SHIFT; |
1da177e4 | 327 | dma_addr_t addr; |
fcfb7866 TI |
328 | if (ofs >= runtime->dma_bytes) |
329 | addr = emu->silent_page.addr; | |
330 | else | |
331 | addr = snd_pcm_sgbuf_get_addr(substream, ofs); | |
1da177e4 | 332 | if (! is_valid_page(emu, addr)) { |
11d42c81 | 333 | dev_err_ratelimited(emu->card->dev, |
6f002b02 | 334 | "emu: failure page = %d\n", idx); |
62932df8 | 335 | mutex_unlock(&hdr->block_mutex); |
1da177e4 LT |
336 | return NULL; |
337 | } | |
338 | emu->page_addr_table[page] = addr; | |
339 | emu->page_ptr_table[page] = NULL; | |
340 | } | |
341 | ||
342 | /* set PTB entries */ | |
343 | blk->map_locked = 1; /* do not unmap this block! */ | |
344 | err = snd_emu10k1_memblk_map(emu, blk); | |
345 | if (err < 0) { | |
eb4698f3 | 346 | __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); |
62932df8 | 347 | mutex_unlock(&hdr->block_mutex); |
1da177e4 LT |
348 | return NULL; |
349 | } | |
62932df8 | 350 | mutex_unlock(&hdr->block_mutex); |
eb4698f3 | 351 | return (struct snd_util_memblk *)blk; |
1da177e4 LT |
352 | } |
353 | ||
354 | ||
355 | /* | |
356 | * release DMA buffer from page table | |
357 | */ | |
eb4698f3 | 358 | int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk) |
1da177e4 | 359 | { |
da3cec35 TI |
360 | if (snd_BUG_ON(!emu || !blk)) |
361 | return -EINVAL; | |
1da177e4 LT |
362 | return snd_emu10k1_synth_free(emu, blk); |
363 | } | |
364 | ||
04f8773a MS |
365 | /* |
366 | * allocate DMA pages, widening the allocation if necessary | |
367 | * | |
368 | * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why | |
369 | * this might be needed. | |
370 | * | |
371 | * If you modify this function check whether __synth_free_pages() also needs | |
372 | * changes. | |
373 | */ | |
374 | int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size, | |
375 | struct snd_dma_buffer *dmab) | |
376 | { | |
377 | if (emu->iommu_workaround) { | |
378 | size_t npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; | |
379 | size_t size_real = npages * PAGE_SIZE; | |
380 | ||
381 | /* | |
382 | * The device has been observed to accesses up to 256 extra | |
383 | * bytes, but use 1k to be safe. | |
384 | */ | |
385 | if (size_real < size + 1024) | |
386 | size += PAGE_SIZE; | |
387 | } | |
388 | ||
389 | return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, | |
6974f8ad | 390 | &emu->pci->dev, size, dmab); |
04f8773a | 391 | } |
1da177e4 LT |
392 | |
393 | /* | |
394 | * memory allocation using multiple pages (for synth) | |
395 | * Unlike the DMA allocation above, non-contiguous pages are assined. | |
396 | */ | |
397 | ||
398 | /* | |
399 | * allocate a synth sample area | |
400 | */ | |
eb4698f3 TI |
401 | struct snd_util_memblk * |
402 | snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size) | |
1da177e4 | 403 | { |
eb4698f3 TI |
404 | struct snd_emu10k1_memblk *blk; |
405 | struct snd_util_memhdr *hdr = hw->memhdr; | |
1da177e4 | 406 | |
62932df8 | 407 | mutex_lock(&hdr->block_mutex); |
eb4698f3 | 408 | blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size); |
1da177e4 | 409 | if (blk == NULL) { |
62932df8 | 410 | mutex_unlock(&hdr->block_mutex); |
1da177e4 LT |
411 | return NULL; |
412 | } | |
413 | if (synth_alloc_pages(hw, blk)) { | |
eb4698f3 | 414 | __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); |
62932df8 | 415 | mutex_unlock(&hdr->block_mutex); |
1da177e4 LT |
416 | return NULL; |
417 | } | |
418 | snd_emu10k1_memblk_map(hw, blk); | |
62932df8 | 419 | mutex_unlock(&hdr->block_mutex); |
eb4698f3 | 420 | return (struct snd_util_memblk *)blk; |
1da177e4 LT |
421 | } |
422 | ||
2dd31dee | 423 | EXPORT_SYMBOL(snd_emu10k1_synth_alloc); |
1da177e4 LT |
424 | |
425 | /* | |
426 | * free a synth sample area | |
427 | */ | |
428 | int | |
eb4698f3 | 429 | snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk) |
1da177e4 | 430 | { |
eb4698f3 TI |
431 | struct snd_util_memhdr *hdr = emu->memhdr; |
432 | struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk; | |
1da177e4 LT |
433 | unsigned long flags; |
434 | ||
62932df8 | 435 | mutex_lock(&hdr->block_mutex); |
1da177e4 LT |
436 | spin_lock_irqsave(&emu->memblk_lock, flags); |
437 | if (blk->mapped_page >= 0) | |
438 | unmap_memblk(emu, blk); | |
439 | spin_unlock_irqrestore(&emu->memblk_lock, flags); | |
440 | synth_free_pages(emu, blk); | |
441 | __snd_util_mem_free(hdr, memblk); | |
62932df8 | 442 | mutex_unlock(&hdr->block_mutex); |
1da177e4 LT |
443 | return 0; |
444 | } | |
445 | ||
2dd31dee | 446 | EXPORT_SYMBOL(snd_emu10k1_synth_free); |
1da177e4 LT |
447 | |
448 | /* check new allocation range */ | |
eb4698f3 TI |
449 | static void get_single_page_range(struct snd_util_memhdr *hdr, |
450 | struct snd_emu10k1_memblk *blk, | |
451 | int *first_page_ret, int *last_page_ret) | |
1da177e4 LT |
452 | { |
453 | struct list_head *p; | |
eb4698f3 | 454 | struct snd_emu10k1_memblk *q; |
1da177e4 LT |
455 | int first_page, last_page; |
456 | first_page = blk->first_page; | |
457 | if ((p = blk->mem.list.prev) != &hdr->block) { | |
458 | q = get_emu10k1_memblk(p, mem.list); | |
459 | if (q->last_page == first_page) | |
460 | first_page++; /* first page was already allocated */ | |
461 | } | |
462 | last_page = blk->last_page; | |
463 | if ((p = blk->mem.list.next) != &hdr->block) { | |
464 | q = get_emu10k1_memblk(p, mem.list); | |
465 | if (q->first_page == last_page) | |
466 | last_page--; /* last page was already allocated */ | |
467 | } | |
468 | *first_page_ret = first_page; | |
469 | *last_page_ret = last_page; | |
470 | } | |
471 | ||
a5003fc0 TI |
472 | /* release allocated pages */ |
473 | static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page, | |
474 | int last_page) | |
475 | { | |
055e0ae1 | 476 | struct snd_dma_buffer dmab; |
a5003fc0 TI |
477 | int page; |
478 | ||
055e0ae1 | 479 | dmab.dev.type = SNDRV_DMA_TYPE_DEV; |
6974f8ad | 480 | dmab.dev.dev = &emu->pci->dev; |
055e0ae1 | 481 | |
a5003fc0 | 482 | for (page = first_page; page <= last_page; page++) { |
055e0ae1 MS |
483 | if (emu->page_ptr_table[page] == NULL) |
484 | continue; | |
485 | dmab.area = emu->page_ptr_table[page]; | |
486 | dmab.addr = emu->page_addr_table[page]; | |
04f8773a MS |
487 | |
488 | /* | |
489 | * please keep me in sync with logic in | |
490 | * snd_emu10k1_alloc_pages_maybe_wider() | |
491 | */ | |
055e0ae1 | 492 | dmab.bytes = PAGE_SIZE; |
04f8773a MS |
493 | if (emu->iommu_workaround) |
494 | dmab.bytes *= 2; | |
495 | ||
055e0ae1 | 496 | snd_dma_free_pages(&dmab); |
a5003fc0 TI |
497 | emu->page_addr_table[page] = 0; |
498 | emu->page_ptr_table[page] = NULL; | |
499 | } | |
500 | } | |
501 | ||
1da177e4 LT |
502 | /* |
503 | * allocate kernel pages | |
504 | */ | |
eb4698f3 | 505 | static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) |
1da177e4 LT |
506 | { |
507 | int page, first_page, last_page; | |
055e0ae1 | 508 | struct snd_dma_buffer dmab; |
1da177e4 LT |
509 | |
510 | emu10k1_memblk_init(blk); | |
511 | get_single_page_range(emu->memhdr, blk, &first_page, &last_page); | |
512 | /* allocate kernel pages */ | |
513 | for (page = first_page; page <= last_page; page++) { | |
04f8773a MS |
514 | if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE, |
515 | &dmab) < 0) | |
055e0ae1 MS |
516 | goto __fail; |
517 | if (!is_valid_page(emu, dmab.addr)) { | |
518 | snd_dma_free_pages(&dmab); | |
519 | goto __fail; | |
1da177e4 | 520 | } |
055e0ae1 MS |
521 | emu->page_addr_table[page] = dmab.addr; |
522 | emu->page_ptr_table[page] = dmab.area; | |
1da177e4 LT |
523 | } |
524 | return 0; | |
055e0ae1 MS |
525 | |
526 | __fail: | |
527 | /* release allocated pages */ | |
528 | last_page = page - 1; | |
529 | __synth_free_pages(emu, first_page, last_page); | |
530 | ||
531 | return -ENOMEM; | |
1da177e4 LT |
532 | } |
533 | ||
534 | /* | |
535 | * free pages | |
536 | */ | |
eb4698f3 | 537 | static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) |
1da177e4 | 538 | { |
a5003fc0 | 539 | int first_page, last_page; |
1da177e4 LT |
540 | |
541 | get_single_page_range(emu->memhdr, blk, &first_page, &last_page); | |
a5003fc0 | 542 | __synth_free_pages(emu, first_page, last_page); |
1da177e4 LT |
543 | return 0; |
544 | } | |
545 | ||
546 | /* calculate buffer pointer from offset address */ | |
eb4698f3 | 547 | static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset) |
1da177e4 LT |
548 | { |
549 | char *ptr; | |
da3cec35 TI |
550 | if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages)) |
551 | return NULL; | |
1da177e4 LT |
552 | ptr = emu->page_ptr_table[page]; |
553 | if (! ptr) { | |
6f002b02 TI |
554 | dev_err(emu->card->dev, |
555 | "access to NULL ptr: page = %d\n", page); | |
1da177e4 LT |
556 | return NULL; |
557 | } | |
558 | ptr += offset & (PAGE_SIZE - 1); | |
559 | return (void*)ptr; | |
560 | } | |
561 | ||
562 | /* | |
563 | * bzero(blk + offset, size) | |
564 | */ | |
eb4698f3 TI |
565 | int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, |
566 | int offset, int size) | |
1da177e4 LT |
567 | { |
568 | int page, nextofs, end_offset, temp, temp1; | |
569 | void *ptr; | |
eb4698f3 | 570 | struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; |
1da177e4 LT |
571 | |
572 | offset += blk->offset & (PAGE_SIZE - 1); | |
573 | end_offset = offset + size; | |
574 | page = get_aligned_page(offset); | |
575 | do { | |
576 | nextofs = aligned_page_offset(page + 1); | |
577 | temp = nextofs - offset; | |
578 | temp1 = end_offset - offset; | |
579 | if (temp1 < temp) | |
580 | temp = temp1; | |
581 | ptr = offset_ptr(emu, page + p->first_page, offset); | |
582 | if (ptr) | |
583 | memset(ptr, 0, temp); | |
584 | offset = nextofs; | |
585 | page++; | |
586 | } while (offset < end_offset); | |
587 | return 0; | |
588 | } | |
589 | ||
2dd31dee TI |
590 | EXPORT_SYMBOL(snd_emu10k1_synth_bzero); |
591 | ||
1da177e4 LT |
592 | /* |
593 | * copy_from_user(blk + offset, data, size) | |
594 | */ | |
eb4698f3 TI |
595 | int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, |
596 | int offset, const char __user *data, int size) | |
1da177e4 LT |
597 | { |
598 | int page, nextofs, end_offset, temp, temp1; | |
599 | void *ptr; | |
eb4698f3 | 600 | struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; |
1da177e4 LT |
601 | |
602 | offset += blk->offset & (PAGE_SIZE - 1); | |
603 | end_offset = offset + size; | |
604 | page = get_aligned_page(offset); | |
605 | do { | |
606 | nextofs = aligned_page_offset(page + 1); | |
607 | temp = nextofs - offset; | |
608 | temp1 = end_offset - offset; | |
609 | if (temp1 < temp) | |
610 | temp = temp1; | |
611 | ptr = offset_ptr(emu, page + p->first_page, offset); | |
612 | if (ptr && copy_from_user(ptr, data, temp)) | |
613 | return -EFAULT; | |
614 | offset = nextofs; | |
615 | data += temp; | |
616 | page++; | |
617 | } while (offset < end_offset); | |
618 | return 0; | |
619 | } | |
2dd31dee TI |
620 | |
621 | EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user); |