]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - sound/core/seq/seq_memory.c
[ALSA] Remove xxx_t typedefs: Sequencer
[mirror_ubuntu-artful-kernel.git] / sound / core / seq / seq_memory.c
1 /*
2 * ALSA sequencer Memory Manager
3 * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
4 * Jaroslav Kysela <perex@suse.cz>
5 * 2000 by Takashi Iwai <tiwai@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23 #include <sound/driver.h>
24 #include <linux/init.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <sound/core.h>
28
29 #include <sound/seq_kernel.h>
30 #include "seq_memory.h"
31 #include "seq_queue.h"
32 #include "seq_info.h"
33 #include "seq_lock.h"
34
35 /* semaphore in struct file record */
36 #define semaphore_of(fp) ((fp)->f_dentry->d_inode->i_sem)
37
38
39 static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
40 {
41 return pool->total_elements - atomic_read(&pool->counter);
42 }
43
44 static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
45 {
46 return snd_seq_pool_available(pool) >= pool->room;
47 }
48
49 /*
50 * Variable length event:
51 * The event like sysex uses variable length type.
52 * The external data may be stored in three different formats.
53 * 1) kernel space
54 * This is the normal case.
55 * ext.data.len = length
56 * ext.data.ptr = buffer pointer
57 * 2) user space
58 * When an event is generated via read(), the external data is
59 * kept in user space until expanded.
60 * ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
61 * ext.data.ptr = userspace pointer
62 * 3) chained cells
63 * When the variable length event is enqueued (in prioq or fifo),
64 * the external data is decomposed to several cells.
65 * ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
66 * ext.data.ptr = the additiona cell head
67 * -> cell.next -> cell.next -> ..
68 */
69
70 /*
71 * exported:
72 * call dump function to expand external data.
73 */
74
75 static int get_var_len(const struct snd_seq_event *event)
76 {
77 if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
78 return -EINVAL;
79
80 return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
81 }
82
83 int snd_seq_dump_var_event(const struct snd_seq_event *event,
84 snd_seq_dump_func_t func, void *private_data)
85 {
86 int len, err;
87 struct snd_seq_event_cell *cell;
88
89 if ((len = get_var_len(event)) <= 0)
90 return len;
91
92 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
93 char buf[32];
94 char __user *curptr = (char __user *)event->data.ext.ptr;
95 while (len > 0) {
96 int size = sizeof(buf);
97 if (len < size)
98 size = len;
99 if (copy_from_user(buf, curptr, size))
100 return -EFAULT;
101 err = func(private_data, buf, size);
102 if (err < 0)
103 return err;
104 curptr += size;
105 len -= size;
106 }
107 return 0;
108 } if (! (event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) {
109 return func(private_data, event->data.ext.ptr, len);
110 }
111
112 cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
113 for (; len > 0 && cell; cell = cell->next) {
114 int size = sizeof(struct snd_seq_event);
115 if (len < size)
116 size = len;
117 err = func(private_data, &cell->event, size);
118 if (err < 0)
119 return err;
120 len -= size;
121 }
122 return 0;
123 }
124
125
126 /*
127 * exported:
128 * expand the variable length event to linear buffer space.
129 */
130
131 static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
132 {
133 memcpy(*bufptr, src, size);
134 *bufptr += size;
135 return 0;
136 }
137
138 static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
139 {
140 if (copy_to_user(*bufptr, src, size))
141 return -EFAULT;
142 *bufptr += size;
143 return 0;
144 }
145
146 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
147 int in_kernel, int size_aligned)
148 {
149 int len, newlen;
150 int err;
151
152 if ((len = get_var_len(event)) < 0)
153 return len;
154 newlen = len;
155 if (size_aligned > 0)
156 newlen = ((len + size_aligned - 1) / size_aligned) * size_aligned;
157 if (count < newlen)
158 return -EAGAIN;
159
160 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
161 if (! in_kernel)
162 return -EINVAL;
163 if (copy_from_user(buf, (void __user *)event->data.ext.ptr, len))
164 return -EFAULT;
165 return newlen;
166 }
167 err = snd_seq_dump_var_event(event,
168 in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
169 (snd_seq_dump_func_t)seq_copy_in_user,
170 &buf);
171 return err < 0 ? err : newlen;
172 }
173
174
175 /*
176 * release this cell, free extended data if available
177 */
178
179 static inline void free_cell(struct snd_seq_pool *pool,
180 struct snd_seq_event_cell *cell)
181 {
182 cell->next = pool->free;
183 pool->free = cell;
184 atomic_dec(&pool->counter);
185 }
186
187 void snd_seq_cell_free(struct snd_seq_event_cell * cell)
188 {
189 unsigned long flags;
190 struct snd_seq_pool *pool;
191
192 snd_assert(cell != NULL, return);
193 pool = cell->pool;
194 snd_assert(pool != NULL, return);
195
196 spin_lock_irqsave(&pool->lock, flags);
197 free_cell(pool, cell);
198 if (snd_seq_ev_is_variable(&cell->event)) {
199 if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
200 struct snd_seq_event_cell *curp, *nextptr;
201 curp = cell->event.data.ext.ptr;
202 for (; curp; curp = nextptr) {
203 nextptr = curp->next;
204 curp->next = pool->free;
205 free_cell(pool, curp);
206 }
207 }
208 }
209 if (waitqueue_active(&pool->output_sleep)) {
210 /* has enough space now? */
211 if (snd_seq_output_ok(pool))
212 wake_up(&pool->output_sleep);
213 }
214 spin_unlock_irqrestore(&pool->lock, flags);
215 }
216
217
218 /*
219 * allocate an event cell.
220 */
221 static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
222 struct snd_seq_event_cell **cellp,
223 int nonblock, struct file *file)
224 {
225 struct snd_seq_event_cell *cell;
226 unsigned long flags;
227 int err = -EAGAIN;
228 wait_queue_t wait;
229
230 if (pool == NULL)
231 return -EINVAL;
232
233 *cellp = NULL;
234
235 init_waitqueue_entry(&wait, current);
236 spin_lock_irqsave(&pool->lock, flags);
237 if (pool->ptr == NULL) { /* not initialized */
238 snd_printd("seq: pool is not initialized\n");
239 err = -EINVAL;
240 goto __error;
241 }
242 while (pool->free == NULL && ! nonblock && ! pool->closing) {
243
244 set_current_state(TASK_INTERRUPTIBLE);
245 add_wait_queue(&pool->output_sleep, &wait);
246 spin_unlock_irq(&pool->lock);
247 schedule();
248 spin_lock_irq(&pool->lock);
249 remove_wait_queue(&pool->output_sleep, &wait);
250 /* interrupted? */
251 if (signal_pending(current)) {
252 err = -ERESTARTSYS;
253 goto __error;
254 }
255 }
256 if (pool->closing) { /* closing.. */
257 err = -ENOMEM;
258 goto __error;
259 }
260
261 cell = pool->free;
262 if (cell) {
263 int used;
264 pool->free = cell->next;
265 atomic_inc(&pool->counter);
266 used = atomic_read(&pool->counter);
267 if (pool->max_used < used)
268 pool->max_used = used;
269 pool->event_alloc_success++;
270 /* clear cell pointers */
271 cell->next = NULL;
272 err = 0;
273 } else
274 pool->event_alloc_failures++;
275 *cellp = cell;
276
277 __error:
278 spin_unlock_irqrestore(&pool->lock, flags);
279 return err;
280 }
281
282
283 /*
284 * duplicate the event to a cell.
285 * if the event has external data, the data is decomposed to additional
286 * cells.
287 */
288 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
289 struct snd_seq_event_cell **cellp, int nonblock,
290 struct file *file)
291 {
292 int ncells, err;
293 unsigned int extlen;
294 struct snd_seq_event_cell *cell;
295
296 *cellp = NULL;
297
298 ncells = 0;
299 extlen = 0;
300 if (snd_seq_ev_is_variable(event)) {
301 extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
302 ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event);
303 }
304 if (ncells >= pool->total_elements)
305 return -ENOMEM;
306
307 err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
308 if (err < 0)
309 return err;
310
311 /* copy the event */
312 cell->event = *event;
313
314 /* decompose */
315 if (snd_seq_ev_is_variable(event)) {
316 int len = extlen;
317 int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
318 int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
319 struct snd_seq_event_cell *src, *tmp, *tail;
320 char *buf;
321
322 cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
323 cell->event.data.ext.ptr = NULL;
324
325 src = (struct snd_seq_event_cell *)event->data.ext.ptr;
326 buf = (char *)event->data.ext.ptr;
327 tail = NULL;
328
329 while (ncells-- > 0) {
330 int size = sizeof(struct snd_seq_event);
331 if (len < size)
332 size = len;
333 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
334 if (err < 0)
335 goto __error;
336 if (cell->event.data.ext.ptr == NULL)
337 cell->event.data.ext.ptr = tmp;
338 if (tail)
339 tail->next = tmp;
340 tail = tmp;
341 /* copy chunk */
342 if (is_chained && src) {
343 tmp->event = src->event;
344 src = src->next;
345 } else if (is_usrptr) {
346 if (copy_from_user(&tmp->event, (char __user *)buf, size)) {
347 err = -EFAULT;
348 goto __error;
349 }
350 } else {
351 memcpy(&tmp->event, buf, size);
352 }
353 buf += size;
354 len -= size;
355 }
356 }
357
358 *cellp = cell;
359 return 0;
360
361 __error:
362 snd_seq_cell_free(cell);
363 return err;
364 }
365
366
367 /* poll wait */
368 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
369 poll_table *wait)
370 {
371 poll_wait(file, &pool->output_sleep, wait);
372 return snd_seq_output_ok(pool);
373 }
374
375
376 /* allocate room specified number of events */
377 int snd_seq_pool_init(struct snd_seq_pool *pool)
378 {
379 int cell;
380 struct snd_seq_event_cell *cellptr;
381 unsigned long flags;
382
383 snd_assert(pool != NULL, return -EINVAL);
384 if (pool->ptr) /* should be atomic? */
385 return 0;
386
387 pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
388 if (pool->ptr == NULL) {
389 snd_printd("seq: malloc for sequencer events failed\n");
390 return -ENOMEM;
391 }
392
393 /* add new cells to the free cell list */
394 spin_lock_irqsave(&pool->lock, flags);
395 pool->free = NULL;
396
397 for (cell = 0; cell < pool->size; cell++) {
398 cellptr = pool->ptr + cell;
399 cellptr->pool = pool;
400 cellptr->next = pool->free;
401 pool->free = cellptr;
402 }
403 pool->room = (pool->size + 1) / 2;
404
405 /* init statistics */
406 pool->max_used = 0;
407 pool->total_elements = pool->size;
408 spin_unlock_irqrestore(&pool->lock, flags);
409 return 0;
410 }
411
412 /* remove events */
413 int snd_seq_pool_done(struct snd_seq_pool *pool)
414 {
415 unsigned long flags;
416 struct snd_seq_event_cell *ptr;
417 int max_count = 5 * HZ;
418
419 snd_assert(pool != NULL, return -EINVAL);
420
421 /* wait for closing all threads */
422 spin_lock_irqsave(&pool->lock, flags);
423 pool->closing = 1;
424 spin_unlock_irqrestore(&pool->lock, flags);
425
426 if (waitqueue_active(&pool->output_sleep))
427 wake_up(&pool->output_sleep);
428
429 while (atomic_read(&pool->counter) > 0) {
430 if (max_count == 0) {
431 snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
432 break;
433 }
434 schedule_timeout_uninterruptible(1);
435 max_count--;
436 }
437
438 /* release all resources */
439 spin_lock_irqsave(&pool->lock, flags);
440 ptr = pool->ptr;
441 pool->ptr = NULL;
442 pool->free = NULL;
443 pool->total_elements = 0;
444 spin_unlock_irqrestore(&pool->lock, flags);
445
446 vfree(ptr);
447
448 spin_lock_irqsave(&pool->lock, flags);
449 pool->closing = 0;
450 spin_unlock_irqrestore(&pool->lock, flags);
451
452 return 0;
453 }
454
455
456 /* init new memory pool */
457 struct snd_seq_pool *snd_seq_pool_new(int poolsize)
458 {
459 struct snd_seq_pool *pool;
460
461 /* create pool block */
462 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
463 if (pool == NULL) {
464 snd_printd("seq: malloc failed for pool\n");
465 return NULL;
466 }
467 spin_lock_init(&pool->lock);
468 pool->ptr = NULL;
469 pool->free = NULL;
470 pool->total_elements = 0;
471 atomic_set(&pool->counter, 0);
472 pool->closing = 0;
473 init_waitqueue_head(&pool->output_sleep);
474
475 pool->size = poolsize;
476
477 /* init statistics */
478 pool->max_used = 0;
479 return pool;
480 }
481
482 /* remove memory pool */
483 int snd_seq_pool_delete(struct snd_seq_pool **ppool)
484 {
485 struct snd_seq_pool *pool = *ppool;
486
487 *ppool = NULL;
488 if (pool == NULL)
489 return 0;
490 snd_seq_pool_done(pool);
491 kfree(pool);
492 return 0;
493 }
494
495 /* initialize sequencer memory */
496 int __init snd_sequencer_memory_init(void)
497 {
498 return 0;
499 }
500
501 /* release sequencer memory */
502 void __exit snd_sequencer_memory_done(void)
503 {
504 }
505
506
507 /* exported to seq_clientmgr.c */
508 void snd_seq_info_pool(struct snd_info_buffer *buffer,
509 struct snd_seq_pool *pool, char *space)
510 {
511 if (pool == NULL)
512 return;
513 snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements);
514 snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter));
515 snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used);
516 snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success);
517 snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures);
518 }