]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - sound/core/pcm_native.c
9ade0c8b54a328a5d9767340d34cbf94038d1eec
[mirror_ubuntu-artful-kernel.git] / sound / core / pcm_native.c
1 /*
2 * Digital Audio (PCM) abstract layer
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 *
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/file.h>
25 #include <linux/slab.h>
26 #include <linux/sched/signal.h>
27 #include <linux/time.h>
28 #include <linux/pm_qos.h>
29 #include <linux/io.h>
30 #include <linux/dma-mapping.h>
31 #include <sound/core.h>
32 #include <sound/control.h>
33 #include <sound/info.h>
34 #include <sound/pcm.h>
35 #include <sound/pcm_params.h>
36 #include <sound/timer.h>
37 #include <sound/minors.h>
38 #include <linux/uio.h>
39
40 #include "pcm_local.h"
41
42 #ifdef CONFIG_SND_DEBUG
43 #define CREATE_TRACE_POINTS
44 #include "pcm_param_trace.h"
45 #else
46 #define trace_hw_mask_param_enabled() 0
47 #define trace_hw_interval_param_enabled() 0
48 #define trace_hw_mask_param(substream, type, index, prev, curr)
49 #define trace_hw_interval_param(substream, type, index, prev, curr)
50 #endif
51
52 /*
53 * Compatibility
54 */
55
56 struct snd_pcm_hw_params_old {
57 unsigned int flags;
58 unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
59 SNDRV_PCM_HW_PARAM_ACCESS + 1];
60 struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
61 SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
62 unsigned int rmask;
63 unsigned int cmask;
64 unsigned int info;
65 unsigned int msbits;
66 unsigned int rate_num;
67 unsigned int rate_den;
68 snd_pcm_uframes_t fifo_size;
69 unsigned char reserved[64];
70 };
71
72 #ifdef CONFIG_SND_SUPPORT_OLD_API
73 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
74 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
75
76 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
77 struct snd_pcm_hw_params_old __user * _oparams);
78 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
79 struct snd_pcm_hw_params_old __user * _oparams);
80 #endif
81 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
82
83 /*
84 *
85 */
86
87 static DEFINE_RWLOCK(snd_pcm_link_rwlock);
88 static DECLARE_RWSEM(snd_pcm_link_rwsem);
89
90 /* Writer in rwsem may block readers even during its waiting in queue,
91 * and this may lead to a deadlock when the code path takes read sem
92 * twice (e.g. one in snd_pcm_action_nonatomic() and another in
93 * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
94 * spin until it gets the lock.
95 */
96 static inline void down_write_nonblock(struct rw_semaphore *lock)
97 {
98 while (!down_write_trylock(lock))
99 cond_resched();
100 }
101
102 /**
103 * snd_pcm_stream_lock - Lock the PCM stream
104 * @substream: PCM substream
105 *
106 * This locks the PCM stream's spinlock or mutex depending on the nonatomic
107 * flag of the given substream. This also takes the global link rw lock
108 * (or rw sem), too, for avoiding the race with linked streams.
109 */
110 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
111 {
112 if (substream->pcm->nonatomic) {
113 down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
114 mutex_lock(&substream->self_group.mutex);
115 } else {
116 read_lock(&snd_pcm_link_rwlock);
117 spin_lock(&substream->self_group.lock);
118 }
119 }
120 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
121
122 /**
123 * snd_pcm_stream_lock - Unlock the PCM stream
124 * @substream: PCM substream
125 *
126 * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
127 */
128 void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
129 {
130 if (substream->pcm->nonatomic) {
131 mutex_unlock(&substream->self_group.mutex);
132 up_read(&snd_pcm_link_rwsem);
133 } else {
134 spin_unlock(&substream->self_group.lock);
135 read_unlock(&snd_pcm_link_rwlock);
136 }
137 }
138 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
139
140 /**
141 * snd_pcm_stream_lock_irq - Lock the PCM stream
142 * @substream: PCM substream
143 *
144 * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
145 * IRQ (only when nonatomic is false). In nonatomic case, this is identical
146 * as snd_pcm_stream_lock().
147 */
148 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
149 {
150 if (!substream->pcm->nonatomic)
151 local_irq_disable();
152 snd_pcm_stream_lock(substream);
153 }
154 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
155
156 /**
157 * snd_pcm_stream_unlock_irq - Unlock the PCM stream
158 * @substream: PCM substream
159 *
160 * This is a counter-part of snd_pcm_stream_lock_irq().
161 */
162 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
163 {
164 snd_pcm_stream_unlock(substream);
165 if (!substream->pcm->nonatomic)
166 local_irq_enable();
167 }
168 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
169
170 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
171 {
172 unsigned long flags = 0;
173 if (!substream->pcm->nonatomic)
174 local_irq_save(flags);
175 snd_pcm_stream_lock(substream);
176 return flags;
177 }
178 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
179
180 /**
181 * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
182 * @substream: PCM substream
183 * @flags: irq flags
184 *
185 * This is a counter-part of snd_pcm_stream_lock_irqsave().
186 */
187 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
188 unsigned long flags)
189 {
190 snd_pcm_stream_unlock(substream);
191 if (!substream->pcm->nonatomic)
192 local_irq_restore(flags);
193 }
194 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
195
196 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
197 {
198 struct snd_pcm_runtime *runtime;
199 struct snd_pcm *pcm = substream->pcm;
200 struct snd_pcm_str *pstr = substream->pstr;
201
202 memset(info, 0, sizeof(*info));
203 info->card = pcm->card->number;
204 info->device = pcm->device;
205 info->stream = substream->stream;
206 info->subdevice = substream->number;
207 strlcpy(info->id, pcm->id, sizeof(info->id));
208 strlcpy(info->name, pcm->name, sizeof(info->name));
209 info->dev_class = pcm->dev_class;
210 info->dev_subclass = pcm->dev_subclass;
211 info->subdevices_count = pstr->substream_count;
212 info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
213 strlcpy(info->subname, substream->name, sizeof(info->subname));
214 runtime = substream->runtime;
215
216 return 0;
217 }
218
219 int snd_pcm_info_user(struct snd_pcm_substream *substream,
220 struct snd_pcm_info __user * _info)
221 {
222 struct snd_pcm_info *info;
223 int err;
224
225 info = kmalloc(sizeof(*info), GFP_KERNEL);
226 if (! info)
227 return -ENOMEM;
228 err = snd_pcm_info(substream, info);
229 if (err >= 0) {
230 if (copy_to_user(_info, info, sizeof(*info)))
231 err = -EFAULT;
232 }
233 kfree(info);
234 return err;
235 }
236
237 static bool hw_support_mmap(struct snd_pcm_substream *substream)
238 {
239 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
240 return false;
241 /* check architectures that return -EINVAL from dma_mmap_coherent() */
242 /* FIXME: this should be some global flag */
243 #if defined(CONFIG_C6X) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) ||\
244 defined(CONFIG_PARISC) || defined(CONFIG_XTENSA)
245 if (!substream->ops->mmap &&
246 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
247 return false;
248 #endif
249 return true;
250 }
251
252 static int constrain_mask_params(struct snd_pcm_substream *substream,
253 struct snd_pcm_hw_params *params)
254 {
255 struct snd_pcm_hw_constraints *constrs =
256 &substream->runtime->hw_constraints;
257 struct snd_mask *m;
258 unsigned int k;
259 struct snd_mask old_mask;
260 int changed;
261
262 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
263 m = hw_param_mask(params, k);
264 if (snd_mask_empty(m))
265 return -EINVAL;
266
267 /* This parameter is not requested to change by a caller. */
268 if (!(params->rmask & (1 << k)))
269 continue;
270
271 if (trace_hw_mask_param_enabled())
272 old_mask = *m;
273
274 changed = snd_mask_refine(m, constrs_mask(constrs, k));
275 if (changed < 0)
276 return changed;
277 if (changed == 0)
278 continue;
279
280 /* Set corresponding flag so that the caller gets it. */
281 trace_hw_mask_param(substream, k, 0, &old_mask, m);
282 params->cmask |= 1 << k;
283 }
284
285 return 0;
286 }
287
288 static int constrain_interval_params(struct snd_pcm_substream *substream,
289 struct snd_pcm_hw_params *params)
290 {
291 struct snd_pcm_hw_constraints *constrs =
292 &substream->runtime->hw_constraints;
293 struct snd_interval *i;
294 unsigned int k;
295 struct snd_interval old_interval;
296 int changed;
297
298 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
299 i = hw_param_interval(params, k);
300 if (snd_interval_empty(i))
301 return -EINVAL;
302
303 /* This parameter is not requested to change by a caller. */
304 if (!(params->rmask & (1 << k)))
305 continue;
306
307 if (trace_hw_interval_param_enabled())
308 old_interval = *i;
309
310 changed = snd_interval_refine(i, constrs_interval(constrs, k));
311 if (changed < 0)
312 return changed;
313 if (changed == 0)
314 continue;
315
316 /* Set corresponding flag so that the caller gets it. */
317 trace_hw_interval_param(substream, k, 0, &old_interval, i);
318 params->cmask |= 1 << k;
319 }
320
321 return 0;
322 }
323
324 static int constrain_params_by_rules(struct snd_pcm_substream *substream,
325 struct snd_pcm_hw_params *params)
326 {
327 struct snd_pcm_hw_constraints *constrs =
328 &substream->runtime->hw_constraints;
329 unsigned int k;
330 unsigned int rstamps[constrs->rules_num];
331 unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
332 unsigned int stamp;
333 struct snd_pcm_hw_rule *r;
334 unsigned int d;
335 struct snd_mask old_mask;
336 struct snd_interval old_interval;
337 bool again;
338 int changed;
339
340 /*
341 * Each application of rule has own sequence number.
342 *
343 * Each member of 'rstamps' array represents the sequence number of
344 * recent application of corresponding rule.
345 */
346 for (k = 0; k < constrs->rules_num; k++)
347 rstamps[k] = 0;
348
349 /*
350 * Each member of 'vstamps' array represents the sequence number of
351 * recent application of rule in which corresponding parameters were
352 * changed.
353 *
354 * In initial state, elements corresponding to parameters requested by
355 * a caller is 1. For unrequested parameters, corresponding members
356 * have 0 so that the parameters are never changed anymore.
357 */
358 for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
359 vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
360
361 /* Due to the above design, actual sequence number starts at 2. */
362 stamp = 2;
363 retry:
364 /* Apply all rules in order. */
365 again = false;
366 for (k = 0; k < constrs->rules_num; k++) {
367 r = &constrs->rules[k];
368
369 /*
370 * Check condition bits of this rule. When the rule has
371 * some condition bits, parameter without the bits is
372 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
373 * is an example of the condition bits.
374 */
375 if (r->cond && !(r->cond & params->flags))
376 continue;
377
378 /*
379 * The 'deps' array includes maximum three dependencies
380 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
381 * member of this array is a sentinel and should be
382 * negative value.
383 *
384 * This rule should be processed in this time when dependent
385 * parameters were changed at former applications of the other
386 * rules.
387 */
388 for (d = 0; r->deps[d] >= 0; d++) {
389 if (vstamps[r->deps[d]] > rstamps[k])
390 break;
391 }
392 if (r->deps[d] < 0)
393 continue;
394
395 if (trace_hw_mask_param_enabled()) {
396 if (hw_is_mask(r->var))
397 old_mask = *hw_param_mask(params, r->var);
398 }
399 if (trace_hw_interval_param_enabled()) {
400 if (hw_is_interval(r->var))
401 old_interval = *hw_param_interval(params, r->var);
402 }
403
404 changed = r->func(params, r);
405 if (changed < 0)
406 return changed;
407
408 /*
409 * When the parameter is changed, notify it to the caller
410 * by corresponding returned bit, then preparing for next
411 * iteration.
412 */
413 if (changed && r->var >= 0) {
414 if (hw_is_mask(r->var)) {
415 trace_hw_mask_param(substream, r->var,
416 k + 1, &old_mask,
417 hw_param_mask(params, r->var));
418 }
419 if (hw_is_interval(r->var)) {
420 trace_hw_interval_param(substream, r->var,
421 k + 1, &old_interval,
422 hw_param_interval(params, r->var));
423 }
424
425 params->cmask |= (1 << r->var);
426 vstamps[r->var] = stamp;
427 again = true;
428 }
429
430 rstamps[k] = stamp++;
431 }
432
433 /* Iterate to evaluate all rules till no parameters are changed. */
434 if (again)
435 goto retry;
436
437 return 0;
438 }
439
440 static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
441 struct snd_pcm_hw_params *params)
442 {
443 const struct snd_interval *i;
444 const struct snd_mask *m;
445 int err;
446
447 if (!params->msbits) {
448 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
449 if (snd_interval_single(i))
450 params->msbits = snd_interval_value(i);
451 }
452
453 if (!params->rate_den) {
454 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
455 if (snd_interval_single(i)) {
456 params->rate_num = snd_interval_value(i);
457 params->rate_den = 1;
458 }
459 }
460
461 if (!params->fifo_size) {
462 m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
463 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
464 if (snd_mask_single(m) && snd_interval_single(i)) {
465 err = substream->ops->ioctl(substream,
466 SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
467 if (err < 0)
468 return err;
469 }
470 }
471
472 if (!params->info) {
473 params->info = substream->runtime->hw.info;
474 params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
475 SNDRV_PCM_INFO_DRAIN_TRIGGER);
476 if (!hw_support_mmap(substream))
477 params->info &= ~(SNDRV_PCM_INFO_MMAP |
478 SNDRV_PCM_INFO_MMAP_VALID);
479 }
480
481 return 0;
482 }
483
484 int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
485 struct snd_pcm_hw_params *params)
486 {
487 int err;
488
489 params->info = 0;
490 params->fifo_size = 0;
491 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
492 params->msbits = 0;
493 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
494 params->rate_num = 0;
495 params->rate_den = 0;
496 }
497
498 err = constrain_mask_params(substream, params);
499 if (err < 0)
500 return err;
501
502 err = constrain_interval_params(substream, params);
503 if (err < 0)
504 return err;
505
506 err = constrain_params_by_rules(substream, params);
507 if (err < 0)
508 return err;
509
510 params->rmask = 0;
511
512 return 0;
513 }
514 EXPORT_SYMBOL(snd_pcm_hw_refine);
515
516 static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
517 struct snd_pcm_hw_params __user * _params)
518 {
519 struct snd_pcm_hw_params *params;
520 int err;
521
522 params = memdup_user(_params, sizeof(*params));
523 if (IS_ERR(params))
524 return PTR_ERR(params);
525
526 err = snd_pcm_hw_refine(substream, params);
527 if (err < 0)
528 goto end;
529
530 err = fixup_unreferenced_params(substream, params);
531 if (err < 0)
532 goto end;
533
534 if (copy_to_user(_params, params, sizeof(*params)))
535 err = -EFAULT;
536 end:
537 kfree(params);
538 return err;
539 }
540
541 static int period_to_usecs(struct snd_pcm_runtime *runtime)
542 {
543 int usecs;
544
545 if (! runtime->rate)
546 return -1; /* invalid */
547
548 /* take 75% of period time as the deadline */
549 usecs = (750000 / runtime->rate) * runtime->period_size;
550 usecs += ((750000 % runtime->rate) * runtime->period_size) /
551 runtime->rate;
552
553 return usecs;
554 }
555
556 static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
557 {
558 snd_pcm_stream_lock_irq(substream);
559 if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
560 substream->runtime->status->state = state;
561 snd_pcm_stream_unlock_irq(substream);
562 }
563
564 static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
565 int event)
566 {
567 #ifdef CONFIG_SND_PCM_TIMER
568 if (substream->timer)
569 snd_timer_notify(substream->timer, event,
570 &substream->runtime->trigger_tstamp);
571 #endif
572 }
573
574 /**
575 * snd_pcm_hw_param_choose - choose a configuration defined by @params
576 * @pcm: PCM instance
577 * @params: the hw_params instance
578 *
579 * Choose one configuration from configuration space defined by @params.
580 * The configuration chosen is that obtained fixing in this order:
581 * first access, first format, first subformat, min channels,
582 * min rate, min period time, max buffer size, min tick time
583 *
584 * Return: Zero if successful, or a negative error code on failure.
585 */
586 static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
587 struct snd_pcm_hw_params *params)
588 {
589 static const int vars[] = {
590 SNDRV_PCM_HW_PARAM_ACCESS,
591 SNDRV_PCM_HW_PARAM_FORMAT,
592 SNDRV_PCM_HW_PARAM_SUBFORMAT,
593 SNDRV_PCM_HW_PARAM_CHANNELS,
594 SNDRV_PCM_HW_PARAM_RATE,
595 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
596 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
597 SNDRV_PCM_HW_PARAM_TICK_TIME,
598 -1
599 };
600 const int *v;
601 struct snd_mask old_mask;
602 struct snd_interval old_interval;
603 int changed;
604
605 for (v = vars; *v != -1; v++) {
606 /* Keep old parameter to trace. */
607 if (trace_hw_mask_param_enabled()) {
608 if (hw_is_mask(*v))
609 old_mask = *hw_param_mask(params, *v);
610 }
611 if (trace_hw_interval_param_enabled()) {
612 if (hw_is_interval(*v))
613 old_interval = *hw_param_interval(params, *v);
614 }
615 if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
616 changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
617 else
618 changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
619 if (snd_BUG_ON(changed < 0))
620 return changed;
621 if (changed == 0)
622 continue;
623
624 /* Trace the changed parameter. */
625 if (hw_is_mask(*v)) {
626 trace_hw_mask_param(pcm, *v, 0, &old_mask,
627 hw_param_mask(params, *v));
628 }
629 if (hw_is_interval(*v)) {
630 trace_hw_interval_param(pcm, *v, 0, &old_interval,
631 hw_param_interval(params, *v));
632 }
633 }
634
635 return 0;
636 }
637
638 static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
639 struct snd_pcm_hw_params *params)
640 {
641 struct snd_pcm_runtime *runtime;
642 int err, usecs;
643 unsigned int bits;
644 snd_pcm_uframes_t frames;
645
646 if (PCM_RUNTIME_CHECK(substream))
647 return -ENXIO;
648 runtime = substream->runtime;
649 snd_pcm_stream_lock_irq(substream);
650 switch (runtime->status->state) {
651 case SNDRV_PCM_STATE_OPEN:
652 case SNDRV_PCM_STATE_SETUP:
653 case SNDRV_PCM_STATE_PREPARED:
654 break;
655 default:
656 snd_pcm_stream_unlock_irq(substream);
657 return -EBADFD;
658 }
659 snd_pcm_stream_unlock_irq(substream);
660 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
661 if (!substream->oss.oss)
662 #endif
663 if (atomic_read(&substream->mmap_count))
664 return -EBADFD;
665
666 params->rmask = ~0U;
667 err = snd_pcm_hw_refine(substream, params);
668 if (err < 0)
669 goto _error;
670
671 err = snd_pcm_hw_params_choose(substream, params);
672 if (err < 0)
673 goto _error;
674
675 err = fixup_unreferenced_params(substream, params);
676 if (err < 0)
677 goto _error;
678
679 if (substream->ops->hw_params != NULL) {
680 err = substream->ops->hw_params(substream, params);
681 if (err < 0)
682 goto _error;
683 }
684
685 runtime->access = params_access(params);
686 runtime->format = params_format(params);
687 runtime->subformat = params_subformat(params);
688 runtime->channels = params_channels(params);
689 runtime->rate = params_rate(params);
690 runtime->period_size = params_period_size(params);
691 runtime->periods = params_periods(params);
692 runtime->buffer_size = params_buffer_size(params);
693 runtime->info = params->info;
694 runtime->rate_num = params->rate_num;
695 runtime->rate_den = params->rate_den;
696 runtime->no_period_wakeup =
697 (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
698 (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
699
700 bits = snd_pcm_format_physical_width(runtime->format);
701 runtime->sample_bits = bits;
702 bits *= runtime->channels;
703 runtime->frame_bits = bits;
704 frames = 1;
705 while (bits % 8 != 0) {
706 bits *= 2;
707 frames *= 2;
708 }
709 runtime->byte_align = bits / 8;
710 runtime->min_align = frames;
711
712 /* Default sw params */
713 runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
714 runtime->period_step = 1;
715 runtime->control->avail_min = runtime->period_size;
716 runtime->start_threshold = 1;
717 runtime->stop_threshold = runtime->buffer_size;
718 runtime->silence_threshold = 0;
719 runtime->silence_size = 0;
720 runtime->boundary = runtime->buffer_size;
721 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
722 runtime->boundary *= 2;
723
724 snd_pcm_timer_resolution_change(substream);
725 snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
726
727 if (pm_qos_request_active(&substream->latency_pm_qos_req))
728 pm_qos_remove_request(&substream->latency_pm_qos_req);
729 if ((usecs = period_to_usecs(runtime)) >= 0)
730 pm_qos_add_request(&substream->latency_pm_qos_req,
731 PM_QOS_CPU_DMA_LATENCY, usecs);
732 return 0;
733 _error:
734 /* hardware might be unusable from this time,
735 so we force application to retry to set
736 the correct hardware parameter settings */
737 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
738 if (substream->ops->hw_free != NULL)
739 substream->ops->hw_free(substream);
740 return err;
741 }
742
743 static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
744 struct snd_pcm_hw_params __user * _params)
745 {
746 struct snd_pcm_hw_params *params;
747 int err;
748
749 params = memdup_user(_params, sizeof(*params));
750 if (IS_ERR(params))
751 return PTR_ERR(params);
752
753 err = snd_pcm_hw_params(substream, params);
754 if (err < 0)
755 goto end;
756
757 if (copy_to_user(_params, params, sizeof(*params)))
758 err = -EFAULT;
759 end:
760 kfree(params);
761 return err;
762 }
763
764 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
765 {
766 struct snd_pcm_runtime *runtime;
767 int result = 0;
768
769 if (PCM_RUNTIME_CHECK(substream))
770 return -ENXIO;
771 runtime = substream->runtime;
772 snd_pcm_stream_lock_irq(substream);
773 switch (runtime->status->state) {
774 case SNDRV_PCM_STATE_SETUP:
775 case SNDRV_PCM_STATE_PREPARED:
776 break;
777 default:
778 snd_pcm_stream_unlock_irq(substream);
779 return -EBADFD;
780 }
781 snd_pcm_stream_unlock_irq(substream);
782 if (atomic_read(&substream->mmap_count))
783 return -EBADFD;
784 if (substream->ops->hw_free)
785 result = substream->ops->hw_free(substream);
786 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
787 pm_qos_remove_request(&substream->latency_pm_qos_req);
788 return result;
789 }
790
791 static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
792 struct snd_pcm_sw_params *params)
793 {
794 struct snd_pcm_runtime *runtime;
795 int err;
796
797 if (PCM_RUNTIME_CHECK(substream))
798 return -ENXIO;
799 runtime = substream->runtime;
800 snd_pcm_stream_lock_irq(substream);
801 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
802 snd_pcm_stream_unlock_irq(substream);
803 return -EBADFD;
804 }
805 snd_pcm_stream_unlock_irq(substream);
806
807 if (params->tstamp_mode < 0 ||
808 params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
809 return -EINVAL;
810 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
811 params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
812 return -EINVAL;
813 if (params->avail_min == 0)
814 return -EINVAL;
815 if (params->silence_size >= runtime->boundary) {
816 if (params->silence_threshold != 0)
817 return -EINVAL;
818 } else {
819 if (params->silence_size > params->silence_threshold)
820 return -EINVAL;
821 if (params->silence_threshold > runtime->buffer_size)
822 return -EINVAL;
823 }
824 err = 0;
825 snd_pcm_stream_lock_irq(substream);
826 runtime->tstamp_mode = params->tstamp_mode;
827 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
828 runtime->tstamp_type = params->tstamp_type;
829 runtime->period_step = params->period_step;
830 runtime->control->avail_min = params->avail_min;
831 runtime->start_threshold = params->start_threshold;
832 runtime->stop_threshold = params->stop_threshold;
833 runtime->silence_threshold = params->silence_threshold;
834 runtime->silence_size = params->silence_size;
835 params->boundary = runtime->boundary;
836 if (snd_pcm_running(substream)) {
837 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
838 runtime->silence_size > 0)
839 snd_pcm_playback_silence(substream, ULONG_MAX);
840 err = snd_pcm_update_state(substream, runtime);
841 }
842 snd_pcm_stream_unlock_irq(substream);
843 return err;
844 }
845
846 static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
847 struct snd_pcm_sw_params __user * _params)
848 {
849 struct snd_pcm_sw_params params;
850 int err;
851 if (copy_from_user(&params, _params, sizeof(params)))
852 return -EFAULT;
853 err = snd_pcm_sw_params(substream, &params);
854 if (copy_to_user(_params, &params, sizeof(params)))
855 return -EFAULT;
856 return err;
857 }
858
859 int snd_pcm_status(struct snd_pcm_substream *substream,
860 struct snd_pcm_status *status)
861 {
862 struct snd_pcm_runtime *runtime = substream->runtime;
863
864 snd_pcm_stream_lock_irq(substream);
865
866 snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
867 &runtime->audio_tstamp_config);
868
869 /* backwards compatible behavior */
870 if (runtime->audio_tstamp_config.type_requested ==
871 SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
872 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
873 runtime->audio_tstamp_config.type_requested =
874 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
875 else
876 runtime->audio_tstamp_config.type_requested =
877 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
878 runtime->audio_tstamp_report.valid = 0;
879 } else
880 runtime->audio_tstamp_report.valid = 1;
881
882 status->state = runtime->status->state;
883 status->suspended_state = runtime->status->suspended_state;
884 if (status->state == SNDRV_PCM_STATE_OPEN)
885 goto _end;
886 status->trigger_tstamp = runtime->trigger_tstamp;
887 if (snd_pcm_running(substream)) {
888 snd_pcm_update_hw_ptr(substream);
889 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
890 status->tstamp = runtime->status->tstamp;
891 status->driver_tstamp = runtime->driver_tstamp;
892 status->audio_tstamp =
893 runtime->status->audio_tstamp;
894 if (runtime->audio_tstamp_report.valid == 1)
895 /* backwards compatibility, no report provided in COMPAT mode */
896 snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
897 &status->audio_tstamp_accuracy,
898 &runtime->audio_tstamp_report);
899
900 goto _tstamp_end;
901 }
902 } else {
903 /* get tstamp only in fallback mode and only if enabled */
904 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
905 snd_pcm_gettime(runtime, &status->tstamp);
906 }
907 _tstamp_end:
908 status->appl_ptr = runtime->control->appl_ptr;
909 status->hw_ptr = runtime->status->hw_ptr;
910 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
911 status->avail = snd_pcm_playback_avail(runtime);
912 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
913 runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
914 status->delay = runtime->buffer_size - status->avail;
915 status->delay += runtime->delay;
916 } else
917 status->delay = 0;
918 } else {
919 status->avail = snd_pcm_capture_avail(runtime);
920 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
921 status->delay = status->avail + runtime->delay;
922 else
923 status->delay = 0;
924 }
925 status->avail_max = runtime->avail_max;
926 status->overrange = runtime->overrange;
927 runtime->avail_max = 0;
928 runtime->overrange = 0;
929 _end:
930 snd_pcm_stream_unlock_irq(substream);
931 return 0;
932 }
933
934 static int snd_pcm_status_user(struct snd_pcm_substream *substream,
935 struct snd_pcm_status __user * _status,
936 bool ext)
937 {
938 struct snd_pcm_status status;
939 int res;
940
941 memset(&status, 0, sizeof(status));
942 /*
943 * with extension, parameters are read/write,
944 * get audio_tstamp_data from user,
945 * ignore rest of status structure
946 */
947 if (ext && get_user(status.audio_tstamp_data,
948 (u32 __user *)(&_status->audio_tstamp_data)))
949 return -EFAULT;
950 res = snd_pcm_status(substream, &status);
951 if (res < 0)
952 return res;
953 if (copy_to_user(_status, &status, sizeof(status)))
954 return -EFAULT;
955 return 0;
956 }
957
958 static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
959 struct snd_pcm_channel_info * info)
960 {
961 struct snd_pcm_runtime *runtime;
962 unsigned int channel;
963
964 channel = info->channel;
965 runtime = substream->runtime;
966 snd_pcm_stream_lock_irq(substream);
967 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
968 snd_pcm_stream_unlock_irq(substream);
969 return -EBADFD;
970 }
971 snd_pcm_stream_unlock_irq(substream);
972 if (channel >= runtime->channels)
973 return -EINVAL;
974 memset(info, 0, sizeof(*info));
975 info->channel = channel;
976 return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
977 }
978
979 static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
980 struct snd_pcm_channel_info __user * _info)
981 {
982 struct snd_pcm_channel_info info;
983 int res;
984
985 if (copy_from_user(&info, _info, sizeof(info)))
986 return -EFAULT;
987 res = snd_pcm_channel_info(substream, &info);
988 if (res < 0)
989 return res;
990 if (copy_to_user(_info, &info, sizeof(info)))
991 return -EFAULT;
992 return 0;
993 }
994
995 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
996 {
997 struct snd_pcm_runtime *runtime = substream->runtime;
998 if (runtime->trigger_master == NULL)
999 return;
1000 if (runtime->trigger_master == substream) {
1001 if (!runtime->trigger_tstamp_latched)
1002 snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
1003 } else {
1004 snd_pcm_trigger_tstamp(runtime->trigger_master);
1005 runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
1006 }
1007 runtime->trigger_master = NULL;
1008 }
1009
1010 struct action_ops {
1011 int (*pre_action)(struct snd_pcm_substream *substream, int state);
1012 int (*do_action)(struct snd_pcm_substream *substream, int state);
1013 void (*undo_action)(struct snd_pcm_substream *substream, int state);
1014 void (*post_action)(struct snd_pcm_substream *substream, int state);
1015 };
1016
1017 /*
1018 * this functions is core for handling of linked stream
1019 * Note: the stream state might be changed also on failure
1020 * Note2: call with calling stream lock + link lock
1021 */
1022 static int snd_pcm_action_group(const struct action_ops *ops,
1023 struct snd_pcm_substream *substream,
1024 int state, int do_lock)
1025 {
1026 struct snd_pcm_substream *s = NULL;
1027 struct snd_pcm_substream *s1;
1028 int res = 0, depth = 1;
1029
1030 snd_pcm_group_for_each_entry(s, substream) {
1031 if (do_lock && s != substream) {
1032 if (s->pcm->nonatomic)
1033 mutex_lock_nested(&s->self_group.mutex, depth);
1034 else
1035 spin_lock_nested(&s->self_group.lock, depth);
1036 depth++;
1037 }
1038 res = ops->pre_action(s, state);
1039 if (res < 0)
1040 goto _unlock;
1041 }
1042 snd_pcm_group_for_each_entry(s, substream) {
1043 res = ops->do_action(s, state);
1044 if (res < 0) {
1045 if (ops->undo_action) {
1046 snd_pcm_group_for_each_entry(s1, substream) {
1047 if (s1 == s) /* failed stream */
1048 break;
1049 ops->undo_action(s1, state);
1050 }
1051 }
1052 s = NULL; /* unlock all */
1053 goto _unlock;
1054 }
1055 }
1056 snd_pcm_group_for_each_entry(s, substream) {
1057 ops->post_action(s, state);
1058 }
1059 _unlock:
1060 if (do_lock) {
1061 /* unlock streams */
1062 snd_pcm_group_for_each_entry(s1, substream) {
1063 if (s1 != substream) {
1064 if (s1->pcm->nonatomic)
1065 mutex_unlock(&s1->self_group.mutex);
1066 else
1067 spin_unlock(&s1->self_group.lock);
1068 }
1069 if (s1 == s) /* end */
1070 break;
1071 }
1072 }
1073 return res;
1074 }
1075
1076 /*
1077 * Note: call with stream lock
1078 */
1079 static int snd_pcm_action_single(const struct action_ops *ops,
1080 struct snd_pcm_substream *substream,
1081 int state)
1082 {
1083 int res;
1084
1085 res = ops->pre_action(substream, state);
1086 if (res < 0)
1087 return res;
1088 res = ops->do_action(substream, state);
1089 if (res == 0)
1090 ops->post_action(substream, state);
1091 else if (ops->undo_action)
1092 ops->undo_action(substream, state);
1093 return res;
1094 }
1095
1096 /*
1097 * Note: call with stream lock
1098 */
1099 static int snd_pcm_action(const struct action_ops *ops,
1100 struct snd_pcm_substream *substream,
1101 int state)
1102 {
1103 int res;
1104
1105 if (!snd_pcm_stream_linked(substream))
1106 return snd_pcm_action_single(ops, substream, state);
1107
1108 if (substream->pcm->nonatomic) {
1109 if (!mutex_trylock(&substream->group->mutex)) {
1110 mutex_unlock(&substream->self_group.mutex);
1111 mutex_lock(&substream->group->mutex);
1112 mutex_lock(&substream->self_group.mutex);
1113 }
1114 res = snd_pcm_action_group(ops, substream, state, 1);
1115 mutex_unlock(&substream->group->mutex);
1116 } else {
1117 if (!spin_trylock(&substream->group->lock)) {
1118 spin_unlock(&substream->self_group.lock);
1119 spin_lock(&substream->group->lock);
1120 spin_lock(&substream->self_group.lock);
1121 }
1122 res = snd_pcm_action_group(ops, substream, state, 1);
1123 spin_unlock(&substream->group->lock);
1124 }
1125 return res;
1126 }
1127
1128 /*
1129 * Note: don't use any locks before
1130 */
1131 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
1132 struct snd_pcm_substream *substream,
1133 int state)
1134 {
1135 int res;
1136
1137 snd_pcm_stream_lock_irq(substream);
1138 res = snd_pcm_action(ops, substream, state);
1139 snd_pcm_stream_unlock_irq(substream);
1140 return res;
1141 }
1142
1143 /*
1144 */
1145 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1146 struct snd_pcm_substream *substream,
1147 int state)
1148 {
1149 int res;
1150
1151 down_read(&snd_pcm_link_rwsem);
1152 if (snd_pcm_stream_linked(substream))
1153 res = snd_pcm_action_group(ops, substream, state, 0);
1154 else
1155 res = snd_pcm_action_single(ops, substream, state);
1156 up_read(&snd_pcm_link_rwsem);
1157 return res;
1158 }
1159
1160 /*
1161 * start callbacks
1162 */
1163 static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
1164 {
1165 struct snd_pcm_runtime *runtime = substream->runtime;
1166 if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
1167 return -EBADFD;
1168 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1169 !snd_pcm_playback_data(substream))
1170 return -EPIPE;
1171 runtime->trigger_tstamp_latched = false;
1172 runtime->trigger_master = substream;
1173 return 0;
1174 }
1175
1176 static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
1177 {
1178 if (substream->runtime->trigger_master != substream)
1179 return 0;
1180 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
1181 }
1182
1183 static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
1184 {
1185 if (substream->runtime->trigger_master == substream)
1186 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1187 }
1188
1189 static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
1190 {
1191 struct snd_pcm_runtime *runtime = substream->runtime;
1192 snd_pcm_trigger_tstamp(substream);
1193 runtime->hw_ptr_jiffies = jiffies;
1194 runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
1195 runtime->rate;
1196 runtime->status->state = state;
1197 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1198 runtime->silence_size > 0)
1199 snd_pcm_playback_silence(substream, ULONG_MAX);
1200 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
1201 }
1202
1203 static const struct action_ops snd_pcm_action_start = {
1204 .pre_action = snd_pcm_pre_start,
1205 .do_action = snd_pcm_do_start,
1206 .undo_action = snd_pcm_undo_start,
1207 .post_action = snd_pcm_post_start
1208 };
1209
1210 /**
1211 * snd_pcm_start - start all linked streams
1212 * @substream: the PCM substream instance
1213 *
1214 * Return: Zero if successful, or a negative error code.
1215 * The stream lock must be acquired before calling this function.
1216 */
1217 int snd_pcm_start(struct snd_pcm_substream *substream)
1218 {
1219 return snd_pcm_action(&snd_pcm_action_start, substream,
1220 SNDRV_PCM_STATE_RUNNING);
1221 }
1222
1223 /* take the stream lock and start the streams */
1224 static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
1225 {
1226 return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
1227 SNDRV_PCM_STATE_RUNNING);
1228 }
1229
1230 /*
1231 * stop callbacks
1232 */
1233 static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
1234 {
1235 struct snd_pcm_runtime *runtime = substream->runtime;
1236 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1237 return -EBADFD;
1238 runtime->trigger_master = substream;
1239 return 0;
1240 }
1241
1242 static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
1243 {
1244 if (substream->runtime->trigger_master == substream &&
1245 snd_pcm_running(substream))
1246 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1247 return 0; /* unconditonally stop all substreams */
1248 }
1249
1250 static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
1251 {
1252 struct snd_pcm_runtime *runtime = substream->runtime;
1253 if (runtime->status->state != state) {
1254 snd_pcm_trigger_tstamp(substream);
1255 runtime->status->state = state;
1256 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
1257 }
1258 wake_up(&runtime->sleep);
1259 wake_up(&runtime->tsleep);
1260 }
1261
1262 static const struct action_ops snd_pcm_action_stop = {
1263 .pre_action = snd_pcm_pre_stop,
1264 .do_action = snd_pcm_do_stop,
1265 .post_action = snd_pcm_post_stop
1266 };
1267
1268 /**
1269 * snd_pcm_stop - try to stop all running streams in the substream group
1270 * @substream: the PCM substream instance
1271 * @state: PCM state after stopping the stream
1272 *
1273 * The state of each stream is then changed to the given state unconditionally.
1274 *
1275 * Return: Zero if successful, or a negative error code.
1276 */
1277 int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
1278 {
1279 return snd_pcm_action(&snd_pcm_action_stop, substream, state);
1280 }
1281 EXPORT_SYMBOL(snd_pcm_stop);
1282
1283 /**
1284 * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1285 * @substream: the PCM substream
1286 *
1287 * After stopping, the state is changed to SETUP.
1288 * Unlike snd_pcm_stop(), this affects only the given stream.
1289 *
1290 * Return: Zero if succesful, or a negative error code.
1291 */
1292 int snd_pcm_drain_done(struct snd_pcm_substream *substream)
1293 {
1294 return snd_pcm_action_single(&snd_pcm_action_stop, substream,
1295 SNDRV_PCM_STATE_SETUP);
1296 }
1297
1298 /**
1299 * snd_pcm_stop_xrun - stop the running streams as XRUN
1300 * @substream: the PCM substream instance
1301 *
1302 * This stops the given running substream (and all linked substreams) as XRUN.
1303 * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1304 *
1305 * Return: Zero if successful, or a negative error code.
1306 */
1307 int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
1308 {
1309 unsigned long flags;
1310 int ret = 0;
1311
1312 snd_pcm_stream_lock_irqsave(substream, flags);
1313 if (snd_pcm_running(substream))
1314 ret = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
1315 snd_pcm_stream_unlock_irqrestore(substream, flags);
1316 return ret;
1317 }
1318 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
1319
1320 /*
1321 * pause callbacks
1322 */
1323 static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
1324 {
1325 struct snd_pcm_runtime *runtime = substream->runtime;
1326 if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
1327 return -ENOSYS;
1328 if (push) {
1329 if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
1330 return -EBADFD;
1331 } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
1332 return -EBADFD;
1333 runtime->trigger_master = substream;
1334 return 0;
1335 }
1336
1337 static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
1338 {
1339 if (substream->runtime->trigger_master != substream)
1340 return 0;
1341 /* some drivers might use hw_ptr to recover from the pause -
1342 update the hw_ptr now */
1343 if (push)
1344 snd_pcm_update_hw_ptr(substream);
1345 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1346 * a delta between the current jiffies, this gives a large enough
1347 * delta, effectively to skip the check once.
1348 */
1349 substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
1350 return substream->ops->trigger(substream,
1351 push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
1352 SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
1353 }
1354
1355 static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
1356 {
1357 if (substream->runtime->trigger_master == substream)
1358 substream->ops->trigger(substream,
1359 push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
1360 SNDRV_PCM_TRIGGER_PAUSE_PUSH);
1361 }
1362
1363 static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
1364 {
1365 struct snd_pcm_runtime *runtime = substream->runtime;
1366 snd_pcm_trigger_tstamp(substream);
1367 if (push) {
1368 runtime->status->state = SNDRV_PCM_STATE_PAUSED;
1369 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
1370 wake_up(&runtime->sleep);
1371 wake_up(&runtime->tsleep);
1372 } else {
1373 runtime->status->state = SNDRV_PCM_STATE_RUNNING;
1374 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
1375 }
1376 }
1377
1378 static const struct action_ops snd_pcm_action_pause = {
1379 .pre_action = snd_pcm_pre_pause,
1380 .do_action = snd_pcm_do_pause,
1381 .undo_action = snd_pcm_undo_pause,
1382 .post_action = snd_pcm_post_pause
1383 };
1384
1385 /*
1386 * Push/release the pause for all linked streams.
1387 */
1388 static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
1389 {
1390 return snd_pcm_action(&snd_pcm_action_pause, substream, push);
1391 }
1392
1393 #ifdef CONFIG_PM
1394 /* suspend */
1395
1396 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
1397 {
1398 struct snd_pcm_runtime *runtime = substream->runtime;
1399 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
1400 return -EBUSY;
1401 runtime->trigger_master = substream;
1402 return 0;
1403 }
1404
1405 static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
1406 {
1407 struct snd_pcm_runtime *runtime = substream->runtime;
1408 if (runtime->trigger_master != substream)
1409 return 0;
1410 if (! snd_pcm_running(substream))
1411 return 0;
1412 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1413 return 0; /* suspend unconditionally */
1414 }
1415
1416 static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
1417 {
1418 struct snd_pcm_runtime *runtime = substream->runtime;
1419 snd_pcm_trigger_tstamp(substream);
1420 runtime->status->suspended_state = runtime->status->state;
1421 runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
1422 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
1423 wake_up(&runtime->sleep);
1424 wake_up(&runtime->tsleep);
1425 }
1426
1427 static const struct action_ops snd_pcm_action_suspend = {
1428 .pre_action = snd_pcm_pre_suspend,
1429 .do_action = snd_pcm_do_suspend,
1430 .post_action = snd_pcm_post_suspend
1431 };
1432
1433 /**
1434 * snd_pcm_suspend - trigger SUSPEND to all linked streams
1435 * @substream: the PCM substream
1436 *
1437 * After this call, all streams are changed to SUSPENDED state.
1438 *
1439 * Return: Zero if successful (or @substream is %NULL), or a negative error
1440 * code.
1441 */
1442 int snd_pcm_suspend(struct snd_pcm_substream *substream)
1443 {
1444 int err;
1445 unsigned long flags;
1446
1447 if (! substream)
1448 return 0;
1449
1450 snd_pcm_stream_lock_irqsave(substream, flags);
1451 err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
1452 snd_pcm_stream_unlock_irqrestore(substream, flags);
1453 return err;
1454 }
1455 EXPORT_SYMBOL(snd_pcm_suspend);
1456
1457 /**
1458 * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1459 * @pcm: the PCM instance
1460 *
1461 * After this call, all streams are changed to SUSPENDED state.
1462 *
1463 * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1464 */
1465 int snd_pcm_suspend_all(struct snd_pcm *pcm)
1466 {
1467 struct snd_pcm_substream *substream;
1468 int stream, err = 0;
1469
1470 if (! pcm)
1471 return 0;
1472
1473 for (stream = 0; stream < 2; stream++) {
1474 for (substream = pcm->streams[stream].substream;
1475 substream; substream = substream->next) {
1476 /* FIXME: the open/close code should lock this as well */
1477 if (substream->runtime == NULL)
1478 continue;
1479 err = snd_pcm_suspend(substream);
1480 if (err < 0 && err != -EBUSY)
1481 return err;
1482 }
1483 }
1484 return 0;
1485 }
1486 EXPORT_SYMBOL(snd_pcm_suspend_all);
1487
1488 /* resume */
1489
1490 static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
1491 {
1492 struct snd_pcm_runtime *runtime = substream->runtime;
1493 if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
1494 return -ENOSYS;
1495 runtime->trigger_master = substream;
1496 return 0;
1497 }
1498
1499 static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
1500 {
1501 struct snd_pcm_runtime *runtime = substream->runtime;
1502 if (runtime->trigger_master != substream)
1503 return 0;
1504 /* DMA not running previously? */
1505 if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING &&
1506 (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING ||
1507 substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
1508 return 0;
1509 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
1510 }
1511
1512 static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
1513 {
1514 if (substream->runtime->trigger_master == substream &&
1515 snd_pcm_running(substream))
1516 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1517 }
1518
1519 static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
1520 {
1521 struct snd_pcm_runtime *runtime = substream->runtime;
1522 snd_pcm_trigger_tstamp(substream);
1523 runtime->status->state = runtime->status->suspended_state;
1524 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
1525 }
1526
1527 static const struct action_ops snd_pcm_action_resume = {
1528 .pre_action = snd_pcm_pre_resume,
1529 .do_action = snd_pcm_do_resume,
1530 .undo_action = snd_pcm_undo_resume,
1531 .post_action = snd_pcm_post_resume
1532 };
1533
1534 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1535 {
1536 return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
1537 }
1538
1539 #else
1540
1541 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1542 {
1543 return -ENOSYS;
1544 }
1545
1546 #endif /* CONFIG_PM */
1547
1548 /*
1549 * xrun ioctl
1550 *
1551 * Change the RUNNING stream(s) to XRUN state.
1552 */
1553 static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1554 {
1555 struct snd_pcm_runtime *runtime = substream->runtime;
1556 int result;
1557
1558 snd_pcm_stream_lock_irq(substream);
1559 switch (runtime->status->state) {
1560 case SNDRV_PCM_STATE_XRUN:
1561 result = 0; /* already there */
1562 break;
1563 case SNDRV_PCM_STATE_RUNNING:
1564 result = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
1565 break;
1566 default:
1567 result = -EBADFD;
1568 }
1569 snd_pcm_stream_unlock_irq(substream);
1570 return result;
1571 }
1572
1573 /*
1574 * reset ioctl
1575 */
1576 static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
1577 {
1578 struct snd_pcm_runtime *runtime = substream->runtime;
1579 switch (runtime->status->state) {
1580 case SNDRV_PCM_STATE_RUNNING:
1581 case SNDRV_PCM_STATE_PREPARED:
1582 case SNDRV_PCM_STATE_PAUSED:
1583 case SNDRV_PCM_STATE_SUSPENDED:
1584 return 0;
1585 default:
1586 return -EBADFD;
1587 }
1588 }
1589
1590 static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
1591 {
1592 struct snd_pcm_runtime *runtime = substream->runtime;
1593 int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
1594 if (err < 0)
1595 return err;
1596 runtime->hw_ptr_base = 0;
1597 runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
1598 runtime->status->hw_ptr % runtime->period_size;
1599 runtime->silence_start = runtime->status->hw_ptr;
1600 runtime->silence_filled = 0;
1601 return 0;
1602 }
1603
1604 static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
1605 {
1606 struct snd_pcm_runtime *runtime = substream->runtime;
1607 runtime->control->appl_ptr = runtime->status->hw_ptr;
1608 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1609 runtime->silence_size > 0)
1610 snd_pcm_playback_silence(substream, ULONG_MAX);
1611 }
1612
1613 static const struct action_ops snd_pcm_action_reset = {
1614 .pre_action = snd_pcm_pre_reset,
1615 .do_action = snd_pcm_do_reset,
1616 .post_action = snd_pcm_post_reset
1617 };
1618
1619 static int snd_pcm_reset(struct snd_pcm_substream *substream)
1620 {
1621 return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
1622 }
1623
1624 /*
1625 * prepare ioctl
1626 */
1627 /* we use the second argument for updating f_flags */
1628 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
1629 int f_flags)
1630 {
1631 struct snd_pcm_runtime *runtime = substream->runtime;
1632 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1633 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1634 return -EBADFD;
1635 if (snd_pcm_running(substream))
1636 return -EBUSY;
1637 substream->f_flags = f_flags;
1638 return 0;
1639 }
1640
1641 static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
1642 {
1643 int err;
1644 err = substream->ops->prepare(substream);
1645 if (err < 0)
1646 return err;
1647 return snd_pcm_do_reset(substream, 0);
1648 }
1649
1650 static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
1651 {
1652 struct snd_pcm_runtime *runtime = substream->runtime;
1653 runtime->control->appl_ptr = runtime->status->hw_ptr;
1654 snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
1655 }
1656
1657 static const struct action_ops snd_pcm_action_prepare = {
1658 .pre_action = snd_pcm_pre_prepare,
1659 .do_action = snd_pcm_do_prepare,
1660 .post_action = snd_pcm_post_prepare
1661 };
1662
1663 /**
1664 * snd_pcm_prepare - prepare the PCM substream to be triggerable
1665 * @substream: the PCM substream instance
1666 * @file: file to refer f_flags
1667 *
1668 * Return: Zero if successful, or a negative error code.
1669 */
1670 static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1671 struct file *file)
1672 {
1673 int f_flags;
1674
1675 if (file)
1676 f_flags = file->f_flags;
1677 else
1678 f_flags = substream->f_flags;
1679
1680 snd_pcm_stream_lock_irq(substream);
1681 switch (substream->runtime->status->state) {
1682 case SNDRV_PCM_STATE_PAUSED:
1683 snd_pcm_pause(substream, 0);
1684 /* fallthru */
1685 case SNDRV_PCM_STATE_SUSPENDED:
1686 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1687 break;
1688 }
1689 snd_pcm_stream_unlock_irq(substream);
1690
1691 return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
1692 substream, f_flags);
1693 }
1694
1695 /*
1696 * drain ioctl
1697 */
1698
1699 static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
1700 {
1701 struct snd_pcm_runtime *runtime = substream->runtime;
1702 switch (runtime->status->state) {
1703 case SNDRV_PCM_STATE_OPEN:
1704 case SNDRV_PCM_STATE_DISCONNECTED:
1705 case SNDRV_PCM_STATE_SUSPENDED:
1706 return -EBADFD;
1707 }
1708 runtime->trigger_master = substream;
1709 return 0;
1710 }
1711
1712 static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
1713 {
1714 struct snd_pcm_runtime *runtime = substream->runtime;
1715 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1716 switch (runtime->status->state) {
1717 case SNDRV_PCM_STATE_PREPARED:
1718 /* start playback stream if possible */
1719 if (! snd_pcm_playback_empty(substream)) {
1720 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
1721 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
1722 } else {
1723 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1724 }
1725 break;
1726 case SNDRV_PCM_STATE_RUNNING:
1727 runtime->status->state = SNDRV_PCM_STATE_DRAINING;
1728 break;
1729 case SNDRV_PCM_STATE_XRUN:
1730 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1731 break;
1732 default:
1733 break;
1734 }
1735 } else {
1736 /* stop running stream */
1737 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
1738 int new_state = snd_pcm_capture_avail(runtime) > 0 ?
1739 SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
1740 snd_pcm_do_stop(substream, new_state);
1741 snd_pcm_post_stop(substream, new_state);
1742 }
1743 }
1744
1745 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
1746 runtime->trigger_master == substream &&
1747 (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
1748 return substream->ops->trigger(substream,
1749 SNDRV_PCM_TRIGGER_DRAIN);
1750
1751 return 0;
1752 }
1753
1754 static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
1755 {
1756 }
1757
1758 static const struct action_ops snd_pcm_action_drain_init = {
1759 .pre_action = snd_pcm_pre_drain_init,
1760 .do_action = snd_pcm_do_drain_init,
1761 .post_action = snd_pcm_post_drain_init
1762 };
1763
1764 static int snd_pcm_drop(struct snd_pcm_substream *substream);
1765
1766 /*
1767 * Drain the stream(s).
1768 * When the substream is linked, sync until the draining of all playback streams
1769 * is finished.
1770 * After this call, all streams are supposed to be either SETUP or DRAINING
1771 * (capture only) state.
1772 */
1773 static int snd_pcm_drain(struct snd_pcm_substream *substream,
1774 struct file *file)
1775 {
1776 struct snd_card *card;
1777 struct snd_pcm_runtime *runtime;
1778 struct snd_pcm_substream *s;
1779 wait_queue_t wait;
1780 int result = 0;
1781 int nonblock = 0;
1782
1783 card = substream->pcm->card;
1784 runtime = substream->runtime;
1785
1786 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1787 return -EBADFD;
1788
1789 if (file) {
1790 if (file->f_flags & O_NONBLOCK)
1791 nonblock = 1;
1792 } else if (substream->f_flags & O_NONBLOCK)
1793 nonblock = 1;
1794
1795 down_read(&snd_pcm_link_rwsem);
1796 snd_pcm_stream_lock_irq(substream);
1797 /* resume pause */
1798 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1799 snd_pcm_pause(substream, 0);
1800
1801 /* pre-start/stop - all running streams are changed to DRAINING state */
1802 result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
1803 if (result < 0)
1804 goto unlock;
1805 /* in non-blocking, we don't wait in ioctl but let caller poll */
1806 if (nonblock) {
1807 result = -EAGAIN;
1808 goto unlock;
1809 }
1810
1811 for (;;) {
1812 long tout;
1813 struct snd_pcm_runtime *to_check;
1814 if (signal_pending(current)) {
1815 result = -ERESTARTSYS;
1816 break;
1817 }
1818 /* find a substream to drain */
1819 to_check = NULL;
1820 snd_pcm_group_for_each_entry(s, substream) {
1821 if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
1822 continue;
1823 runtime = s->runtime;
1824 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
1825 to_check = runtime;
1826 break;
1827 }
1828 }
1829 if (!to_check)
1830 break; /* all drained */
1831 init_waitqueue_entry(&wait, current);
1832 add_wait_queue(&to_check->sleep, &wait);
1833 snd_pcm_stream_unlock_irq(substream);
1834 up_read(&snd_pcm_link_rwsem);
1835 snd_power_unlock(card);
1836 if (runtime->no_period_wakeup)
1837 tout = MAX_SCHEDULE_TIMEOUT;
1838 else {
1839 tout = 10;
1840 if (runtime->rate) {
1841 long t = runtime->period_size * 2 / runtime->rate;
1842 tout = max(t, tout);
1843 }
1844 tout = msecs_to_jiffies(tout * 1000);
1845 }
1846 tout = schedule_timeout_interruptible(tout);
1847 snd_power_lock(card);
1848 down_read(&snd_pcm_link_rwsem);
1849 snd_pcm_stream_lock_irq(substream);
1850 remove_wait_queue(&to_check->sleep, &wait);
1851 if (card->shutdown) {
1852 result = -ENODEV;
1853 break;
1854 }
1855 if (tout == 0) {
1856 if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
1857 result = -ESTRPIPE;
1858 else {
1859 dev_dbg(substream->pcm->card->dev,
1860 "playback drain error (DMA or IRQ trouble?)\n");
1861 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1862 result = -EIO;
1863 }
1864 break;
1865 }
1866 }
1867
1868 unlock:
1869 snd_pcm_stream_unlock_irq(substream);
1870 up_read(&snd_pcm_link_rwsem);
1871
1872 return result;
1873 }
1874
1875 /*
1876 * drop ioctl
1877 *
1878 * Immediately put all linked substreams into SETUP state.
1879 */
1880 static int snd_pcm_drop(struct snd_pcm_substream *substream)
1881 {
1882 struct snd_pcm_runtime *runtime;
1883 int result = 0;
1884
1885 if (PCM_RUNTIME_CHECK(substream))
1886 return -ENXIO;
1887 runtime = substream->runtime;
1888
1889 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1890 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1891 return -EBADFD;
1892
1893 snd_pcm_stream_lock_irq(substream);
1894 /* resume pause */
1895 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1896 snd_pcm_pause(substream, 0);
1897
1898 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1899 /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
1900 snd_pcm_stream_unlock_irq(substream);
1901
1902 return result;
1903 }
1904
1905
1906 static bool is_pcm_file(struct file *file)
1907 {
1908 struct inode *inode = file_inode(file);
1909 unsigned int minor;
1910
1911 if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
1912 return false;
1913 minor = iminor(inode);
1914 return snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) ||
1915 snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
1916 }
1917
1918 /*
1919 * PCM link handling
1920 */
1921 static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1922 {
1923 int res = 0;
1924 struct snd_pcm_file *pcm_file;
1925 struct snd_pcm_substream *substream1;
1926 struct snd_pcm_group *group;
1927 struct fd f = fdget(fd);
1928
1929 if (!f.file)
1930 return -EBADFD;
1931 if (!is_pcm_file(f.file)) {
1932 res = -EBADFD;
1933 goto _badf;
1934 }
1935 pcm_file = f.file->private_data;
1936 substream1 = pcm_file->substream;
1937 group = kmalloc(sizeof(*group), GFP_KERNEL);
1938 if (!group) {
1939 res = -ENOMEM;
1940 goto _nolock;
1941 }
1942 down_write_nonblock(&snd_pcm_link_rwsem);
1943 write_lock_irq(&snd_pcm_link_rwlock);
1944 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1945 substream->runtime->status->state != substream1->runtime->status->state ||
1946 substream->pcm->nonatomic != substream1->pcm->nonatomic) {
1947 res = -EBADFD;
1948 goto _end;
1949 }
1950 if (snd_pcm_stream_linked(substream1)) {
1951 res = -EALREADY;
1952 goto _end;
1953 }
1954 if (!snd_pcm_stream_linked(substream)) {
1955 substream->group = group;
1956 group = NULL;
1957 spin_lock_init(&substream->group->lock);
1958 mutex_init(&substream->group->mutex);
1959 INIT_LIST_HEAD(&substream->group->substreams);
1960 list_add_tail(&substream->link_list, &substream->group->substreams);
1961 substream->group->count = 1;
1962 }
1963 list_add_tail(&substream1->link_list, &substream->group->substreams);
1964 substream->group->count++;
1965 substream1->group = substream->group;
1966 _end:
1967 write_unlock_irq(&snd_pcm_link_rwlock);
1968 up_write(&snd_pcm_link_rwsem);
1969 _nolock:
1970 snd_card_unref(substream1->pcm->card);
1971 kfree(group);
1972 _badf:
1973 fdput(f);
1974 return res;
1975 }
1976
1977 static void relink_to_local(struct snd_pcm_substream *substream)
1978 {
1979 substream->group = &substream->self_group;
1980 INIT_LIST_HEAD(&substream->self_group.substreams);
1981 list_add_tail(&substream->link_list, &substream->self_group.substreams);
1982 }
1983
1984 static int snd_pcm_unlink(struct snd_pcm_substream *substream)
1985 {
1986 struct snd_pcm_substream *s;
1987 int res = 0;
1988
1989 down_write_nonblock(&snd_pcm_link_rwsem);
1990 write_lock_irq(&snd_pcm_link_rwlock);
1991 if (!snd_pcm_stream_linked(substream)) {
1992 res = -EALREADY;
1993 goto _end;
1994 }
1995 list_del(&substream->link_list);
1996 substream->group->count--;
1997 if (substream->group->count == 1) { /* detach the last stream, too */
1998 snd_pcm_group_for_each_entry(s, substream) {
1999 relink_to_local(s);
2000 break;
2001 }
2002 kfree(substream->group);
2003 }
2004 relink_to_local(substream);
2005 _end:
2006 write_unlock_irq(&snd_pcm_link_rwlock);
2007 up_write(&snd_pcm_link_rwsem);
2008 return res;
2009 }
2010
2011 /*
2012 * hw configurator
2013 */
2014 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
2015 struct snd_pcm_hw_rule *rule)
2016 {
2017 struct snd_interval t;
2018 snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
2019 hw_param_interval_c(params, rule->deps[1]), &t);
2020 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2021 }
2022
2023 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
2024 struct snd_pcm_hw_rule *rule)
2025 {
2026 struct snd_interval t;
2027 snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
2028 hw_param_interval_c(params, rule->deps[1]), &t);
2029 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2030 }
2031
2032 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
2033 struct snd_pcm_hw_rule *rule)
2034 {
2035 struct snd_interval t;
2036 snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
2037 hw_param_interval_c(params, rule->deps[1]),
2038 (unsigned long) rule->private, &t);
2039 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2040 }
2041
2042 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
2043 struct snd_pcm_hw_rule *rule)
2044 {
2045 struct snd_interval t;
2046 snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
2047 (unsigned long) rule->private,
2048 hw_param_interval_c(params, rule->deps[1]), &t);
2049 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2050 }
2051
2052 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
2053 struct snd_pcm_hw_rule *rule)
2054 {
2055 unsigned int k;
2056 const struct snd_interval *i =
2057 hw_param_interval_c(params, rule->deps[0]);
2058 struct snd_mask m;
2059 struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2060 snd_mask_any(&m);
2061 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2062 int bits;
2063 if (! snd_mask_test(mask, k))
2064 continue;
2065 bits = snd_pcm_format_physical_width(k);
2066 if (bits <= 0)
2067 continue; /* ignore invalid formats */
2068 if ((unsigned)bits < i->min || (unsigned)bits > i->max)
2069 snd_mask_reset(&m, k);
2070 }
2071 return snd_mask_refine(mask, &m);
2072 }
2073
2074 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
2075 struct snd_pcm_hw_rule *rule)
2076 {
2077 struct snd_interval t;
2078 unsigned int k;
2079 t.min = UINT_MAX;
2080 t.max = 0;
2081 t.openmin = 0;
2082 t.openmax = 0;
2083 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2084 int bits;
2085 if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
2086 continue;
2087 bits = snd_pcm_format_physical_width(k);
2088 if (bits <= 0)
2089 continue; /* ignore invalid formats */
2090 if (t.min > (unsigned)bits)
2091 t.min = bits;
2092 if (t.max < (unsigned)bits)
2093 t.max = bits;
2094 }
2095 t.integer = 1;
2096 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2097 }
2098
2099 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
2100 #error "Change this table"
2101 #endif
2102
2103 static const unsigned int rates[] = {
2104 5512, 8000, 11025, 16000, 22050, 32000, 44100,
2105 48000, 64000, 88200, 96000, 176400, 192000
2106 };
2107
2108 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
2109 .count = ARRAY_SIZE(rates),
2110 .list = rates,
2111 };
2112
2113 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
2114 struct snd_pcm_hw_rule *rule)
2115 {
2116 struct snd_pcm_hardware *hw = rule->private;
2117 return snd_interval_list(hw_param_interval(params, rule->var),
2118 snd_pcm_known_rates.count,
2119 snd_pcm_known_rates.list, hw->rates);
2120 }
2121
2122 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
2123 struct snd_pcm_hw_rule *rule)
2124 {
2125 struct snd_interval t;
2126 struct snd_pcm_substream *substream = rule->private;
2127 t.min = 0;
2128 t.max = substream->buffer_bytes_max;
2129 t.openmin = 0;
2130 t.openmax = 0;
2131 t.integer = 1;
2132 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2133 }
2134
2135 int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
2136 {
2137 struct snd_pcm_runtime *runtime = substream->runtime;
2138 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
2139 int k, err;
2140
2141 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
2142 snd_mask_any(constrs_mask(constrs, k));
2143 }
2144
2145 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
2146 snd_interval_any(constrs_interval(constrs, k));
2147 }
2148
2149 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
2150 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
2151 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
2152 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
2153 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
2154
2155 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
2156 snd_pcm_hw_rule_format, NULL,
2157 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2158 if (err < 0)
2159 return err;
2160 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2161 snd_pcm_hw_rule_sample_bits, NULL,
2162 SNDRV_PCM_HW_PARAM_FORMAT,
2163 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2164 if (err < 0)
2165 return err;
2166 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2167 snd_pcm_hw_rule_div, NULL,
2168 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2169 if (err < 0)
2170 return err;
2171 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2172 snd_pcm_hw_rule_mul, NULL,
2173 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2174 if (err < 0)
2175 return err;
2176 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2177 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2178 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2179 if (err < 0)
2180 return err;
2181 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2182 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2183 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
2184 if (err < 0)
2185 return err;
2186 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2187 snd_pcm_hw_rule_div, NULL,
2188 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2189 if (err < 0)
2190 return err;
2191 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2192 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2193 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
2194 if (err < 0)
2195 return err;
2196 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2197 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2198 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
2199 if (err < 0)
2200 return err;
2201 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
2202 snd_pcm_hw_rule_div, NULL,
2203 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2204 if (err < 0)
2205 return err;
2206 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2207 snd_pcm_hw_rule_div, NULL,
2208 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2209 if (err < 0)
2210 return err;
2211 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2212 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2213 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2214 if (err < 0)
2215 return err;
2216 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2217 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2218 SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2219 if (err < 0)
2220 return err;
2221 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2222 snd_pcm_hw_rule_mul, NULL,
2223 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2224 if (err < 0)
2225 return err;
2226 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2227 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2228 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2229 if (err < 0)
2230 return err;
2231 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2232 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2233 SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2234 if (err < 0)
2235 return err;
2236 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2237 snd_pcm_hw_rule_muldivk, (void*) 8,
2238 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2239 if (err < 0)
2240 return err;
2241 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2242 snd_pcm_hw_rule_muldivk, (void*) 8,
2243 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2244 if (err < 0)
2245 return err;
2246 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
2247 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2248 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2249 if (err < 0)
2250 return err;
2251 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
2252 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2253 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2254 if (err < 0)
2255 return err;
2256 return 0;
2257 }
2258
2259 int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
2260 {
2261 struct snd_pcm_runtime *runtime = substream->runtime;
2262 struct snd_pcm_hardware *hw = &runtime->hw;
2263 int err;
2264 unsigned int mask = 0;
2265
2266 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2267 mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
2268 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2269 mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
2270 if (hw_support_mmap(substream)) {
2271 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2272 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
2273 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2274 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
2275 if (hw->info & SNDRV_PCM_INFO_COMPLEX)
2276 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX;
2277 }
2278 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
2279 if (err < 0)
2280 return err;
2281
2282 err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
2283 if (err < 0)
2284 return err;
2285
2286 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD);
2287 if (err < 0)
2288 return err;
2289
2290 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
2291 hw->channels_min, hw->channels_max);
2292 if (err < 0)
2293 return err;
2294
2295 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
2296 hw->rate_min, hw->rate_max);
2297 if (err < 0)
2298 return err;
2299
2300 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2301 hw->period_bytes_min, hw->period_bytes_max);
2302 if (err < 0)
2303 return err;
2304
2305 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
2306 hw->periods_min, hw->periods_max);
2307 if (err < 0)
2308 return err;
2309
2310 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2311 hw->period_bytes_min, hw->buffer_bytes_max);
2312 if (err < 0)
2313 return err;
2314
2315 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2316 snd_pcm_hw_rule_buffer_bytes_max, substream,
2317 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
2318 if (err < 0)
2319 return err;
2320
2321 /* FIXME: remove */
2322 if (runtime->dma_bytes) {
2323 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
2324 if (err < 0)
2325 return err;
2326 }
2327
2328 if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
2329 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2330 snd_pcm_hw_rule_rate, hw,
2331 SNDRV_PCM_HW_PARAM_RATE, -1);
2332 if (err < 0)
2333 return err;
2334 }
2335
2336 /* FIXME: this belong to lowlevel */
2337 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
2338
2339 return 0;
2340 }
2341
2342 static void pcm_release_private(struct snd_pcm_substream *substream)
2343 {
2344 snd_pcm_unlink(substream);
2345 }
2346
2347 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2348 {
2349 substream->ref_count--;
2350 if (substream->ref_count > 0)
2351 return;
2352
2353 snd_pcm_drop(substream);
2354 if (substream->hw_opened) {
2355 if (substream->ops->hw_free &&
2356 substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
2357 substream->ops->hw_free(substream);
2358 substream->ops->close(substream);
2359 substream->hw_opened = 0;
2360 }
2361 if (pm_qos_request_active(&substream->latency_pm_qos_req))
2362 pm_qos_remove_request(&substream->latency_pm_qos_req);
2363 if (substream->pcm_release) {
2364 substream->pcm_release(substream);
2365 substream->pcm_release = NULL;
2366 }
2367 snd_pcm_detach_substream(substream);
2368 }
2369 EXPORT_SYMBOL(snd_pcm_release_substream);
2370
2371 int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2372 struct file *file,
2373 struct snd_pcm_substream **rsubstream)
2374 {
2375 struct snd_pcm_substream *substream;
2376 int err;
2377
2378 err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2379 if (err < 0)
2380 return err;
2381 if (substream->ref_count > 1) {
2382 *rsubstream = substream;
2383 return 0;
2384 }
2385
2386 err = snd_pcm_hw_constraints_init(substream);
2387 if (err < 0) {
2388 pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
2389 goto error;
2390 }
2391
2392 if ((err = substream->ops->open(substream)) < 0)
2393 goto error;
2394
2395 substream->hw_opened = 1;
2396
2397 err = snd_pcm_hw_constraints_complete(substream);
2398 if (err < 0) {
2399 pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
2400 goto error;
2401 }
2402
2403 *rsubstream = substream;
2404 return 0;
2405
2406 error:
2407 snd_pcm_release_substream(substream);
2408 return err;
2409 }
2410 EXPORT_SYMBOL(snd_pcm_open_substream);
2411
2412 static int snd_pcm_open_file(struct file *file,
2413 struct snd_pcm *pcm,
2414 int stream)
2415 {
2416 struct snd_pcm_file *pcm_file;
2417 struct snd_pcm_substream *substream;
2418 int err;
2419
2420 err = snd_pcm_open_substream(pcm, stream, file, &substream);
2421 if (err < 0)
2422 return err;
2423
2424 pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2425 if (pcm_file == NULL) {
2426 snd_pcm_release_substream(substream);
2427 return -ENOMEM;
2428 }
2429 pcm_file->substream = substream;
2430 if (substream->ref_count == 1) {
2431 substream->file = pcm_file;
2432 substream->pcm_release = pcm_release_private;
2433 }
2434 file->private_data = pcm_file;
2435
2436 return 0;
2437 }
2438
2439 static int snd_pcm_playback_open(struct inode *inode, struct file *file)
2440 {
2441 struct snd_pcm *pcm;
2442 int err = nonseekable_open(inode, file);
2443 if (err < 0)
2444 return err;
2445 pcm = snd_lookup_minor_data(iminor(inode),
2446 SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2447 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
2448 if (pcm)
2449 snd_card_unref(pcm->card);
2450 return err;
2451 }
2452
2453 static int snd_pcm_capture_open(struct inode *inode, struct file *file)
2454 {
2455 struct snd_pcm *pcm;
2456 int err = nonseekable_open(inode, file);
2457 if (err < 0)
2458 return err;
2459 pcm = snd_lookup_minor_data(iminor(inode),
2460 SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2461 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
2462 if (pcm)
2463 snd_card_unref(pcm->card);
2464 return err;
2465 }
2466
2467 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
2468 {
2469 int err;
2470 wait_queue_t wait;
2471
2472 if (pcm == NULL) {
2473 err = -ENODEV;
2474 goto __error1;
2475 }
2476 err = snd_card_file_add(pcm->card, file);
2477 if (err < 0)
2478 goto __error1;
2479 if (!try_module_get(pcm->card->module)) {
2480 err = -EFAULT;
2481 goto __error2;
2482 }
2483 init_waitqueue_entry(&wait, current);
2484 add_wait_queue(&pcm->open_wait, &wait);
2485 mutex_lock(&pcm->open_mutex);
2486 while (1) {
2487 err = snd_pcm_open_file(file, pcm, stream);
2488 if (err >= 0)
2489 break;
2490 if (err == -EAGAIN) {
2491 if (file->f_flags & O_NONBLOCK) {
2492 err = -EBUSY;
2493 break;
2494 }
2495 } else
2496 break;
2497 set_current_state(TASK_INTERRUPTIBLE);
2498 mutex_unlock(&pcm->open_mutex);
2499 schedule();
2500 mutex_lock(&pcm->open_mutex);
2501 if (pcm->card->shutdown) {
2502 err = -ENODEV;
2503 break;
2504 }
2505 if (signal_pending(current)) {
2506 err = -ERESTARTSYS;
2507 break;
2508 }
2509 }
2510 remove_wait_queue(&pcm->open_wait, &wait);
2511 mutex_unlock(&pcm->open_mutex);
2512 if (err < 0)
2513 goto __error;
2514 return err;
2515
2516 __error:
2517 module_put(pcm->card->module);
2518 __error2:
2519 snd_card_file_remove(pcm->card, file);
2520 __error1:
2521 return err;
2522 }
2523
2524 static int snd_pcm_release(struct inode *inode, struct file *file)
2525 {
2526 struct snd_pcm *pcm;
2527 struct snd_pcm_substream *substream;
2528 struct snd_pcm_file *pcm_file;
2529
2530 pcm_file = file->private_data;
2531 substream = pcm_file->substream;
2532 if (snd_BUG_ON(!substream))
2533 return -ENXIO;
2534 pcm = substream->pcm;
2535 mutex_lock(&pcm->open_mutex);
2536 snd_pcm_release_substream(substream);
2537 kfree(pcm_file);
2538 mutex_unlock(&pcm->open_mutex);
2539 wake_up(&pcm->open_wait);
2540 module_put(pcm->card->module);
2541 snd_card_file_remove(pcm->card, file);
2542 return 0;
2543 }
2544
2545 /* check and update PCM state; return 0 or a negative error
2546 * call this inside PCM lock
2547 */
2548 static int do_pcm_hwsync(struct snd_pcm_substream *substream)
2549 {
2550 switch (substream->runtime->status->state) {
2551 case SNDRV_PCM_STATE_DRAINING:
2552 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
2553 return -EBADFD;
2554 /* Fall through */
2555 case SNDRV_PCM_STATE_RUNNING:
2556 return snd_pcm_update_hw_ptr(substream);
2557 case SNDRV_PCM_STATE_PREPARED:
2558 case SNDRV_PCM_STATE_PAUSED:
2559 return 0;
2560 case SNDRV_PCM_STATE_SUSPENDED:
2561 return -ESTRPIPE;
2562 case SNDRV_PCM_STATE_XRUN:
2563 return -EPIPE;
2564 default:
2565 return -EBADFD;
2566 }
2567 }
2568
2569 /* increase the appl_ptr; returns the processed frames or a negative error */
2570 static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2571 snd_pcm_uframes_t frames,
2572 snd_pcm_sframes_t avail)
2573 {
2574 struct snd_pcm_runtime *runtime = substream->runtime;
2575 snd_pcm_sframes_t appl_ptr;
2576 int ret;
2577
2578 if (avail <= 0)
2579 return 0;
2580 if (frames > (snd_pcm_uframes_t)avail)
2581 frames = avail;
2582 appl_ptr = runtime->control->appl_ptr + frames;
2583 if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
2584 appl_ptr -= runtime->boundary;
2585 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2586 return ret < 0 ? ret : frames;
2587 }
2588
2589 /* decrease the appl_ptr; returns the processed frames or a negative error */
2590 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2591 snd_pcm_uframes_t frames,
2592 snd_pcm_sframes_t avail)
2593 {
2594 struct snd_pcm_runtime *runtime = substream->runtime;
2595 snd_pcm_sframes_t appl_ptr;
2596 int ret;
2597
2598 if (avail <= 0)
2599 return 0;
2600 if (frames > (snd_pcm_uframes_t)avail)
2601 frames = avail;
2602 appl_ptr = runtime->control->appl_ptr - frames;
2603 if (appl_ptr < 0)
2604 appl_ptr += runtime->boundary;
2605 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2606 return ret < 0 ? ret : frames;
2607 }
2608
2609 static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
2610 snd_pcm_uframes_t frames)
2611 {
2612 struct snd_pcm_runtime *runtime = substream->runtime;
2613 snd_pcm_sframes_t ret;
2614
2615 if (frames == 0)
2616 return 0;
2617
2618 snd_pcm_stream_lock_irq(substream);
2619 ret = do_pcm_hwsync(substream);
2620 if (!ret)
2621 ret = rewind_appl_ptr(substream, frames,
2622 snd_pcm_playback_hw_avail(runtime));
2623 snd_pcm_stream_unlock_irq(substream);
2624 return ret;
2625 }
2626
2627 static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substream,
2628 snd_pcm_uframes_t frames)
2629 {
2630 struct snd_pcm_runtime *runtime = substream->runtime;
2631 snd_pcm_sframes_t ret;
2632
2633 if (frames == 0)
2634 return 0;
2635
2636 snd_pcm_stream_lock_irq(substream);
2637 ret = do_pcm_hwsync(substream);
2638 if (!ret)
2639 ret = rewind_appl_ptr(substream, frames,
2640 snd_pcm_capture_hw_avail(runtime));
2641 snd_pcm_stream_unlock_irq(substream);
2642 return ret;
2643 }
2644
2645 static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *substream,
2646 snd_pcm_uframes_t frames)
2647 {
2648 struct snd_pcm_runtime *runtime = substream->runtime;
2649 snd_pcm_sframes_t ret;
2650
2651 if (frames == 0)
2652 return 0;
2653
2654 snd_pcm_stream_lock_irq(substream);
2655 ret = do_pcm_hwsync(substream);
2656 if (!ret)
2657 ret = forward_appl_ptr(substream, frames,
2658 snd_pcm_playback_avail(runtime));
2659 snd_pcm_stream_unlock_irq(substream);
2660 return ret;
2661 }
2662
2663 static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *substream,
2664 snd_pcm_uframes_t frames)
2665 {
2666 struct snd_pcm_runtime *runtime = substream->runtime;
2667 snd_pcm_sframes_t ret;
2668
2669 if (frames == 0)
2670 return 0;
2671
2672 snd_pcm_stream_lock_irq(substream);
2673 ret = do_pcm_hwsync(substream);
2674 if (!ret)
2675 ret = forward_appl_ptr(substream, frames,
2676 snd_pcm_capture_avail(runtime));
2677 snd_pcm_stream_unlock_irq(substream);
2678 return ret;
2679 }
2680
2681 static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
2682 {
2683 int err;
2684
2685 snd_pcm_stream_lock_irq(substream);
2686 err = do_pcm_hwsync(substream);
2687 snd_pcm_stream_unlock_irq(substream);
2688 return err;
2689 }
2690
2691 static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream)
2692 {
2693 struct snd_pcm_runtime *runtime = substream->runtime;
2694 int err;
2695 snd_pcm_sframes_t n = 0;
2696
2697 snd_pcm_stream_lock_irq(substream);
2698 err = do_pcm_hwsync(substream);
2699 if (!err) {
2700 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2701 n = snd_pcm_playback_hw_avail(runtime);
2702 else
2703 n = snd_pcm_capture_avail(runtime);
2704 n += runtime->delay;
2705 }
2706 snd_pcm_stream_unlock_irq(substream);
2707 return err < 0 ? err : n;
2708 }
2709
2710 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2711 struct snd_pcm_sync_ptr __user *_sync_ptr)
2712 {
2713 struct snd_pcm_runtime *runtime = substream->runtime;
2714 struct snd_pcm_sync_ptr sync_ptr;
2715 volatile struct snd_pcm_mmap_status *status;
2716 volatile struct snd_pcm_mmap_control *control;
2717 int err;
2718
2719 memset(&sync_ptr, 0, sizeof(sync_ptr));
2720 if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
2721 return -EFAULT;
2722 if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
2723 return -EFAULT;
2724 status = runtime->status;
2725 control = runtime->control;
2726 if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
2727 err = snd_pcm_hwsync(substream);
2728 if (err < 0)
2729 return err;
2730 }
2731 snd_pcm_stream_lock_irq(substream);
2732 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
2733 err = pcm_lib_apply_appl_ptr(substream,
2734 sync_ptr.c.control.appl_ptr);
2735 if (err < 0) {
2736 snd_pcm_stream_unlock_irq(substream);
2737 return err;
2738 }
2739 } else {
2740 sync_ptr.c.control.appl_ptr = control->appl_ptr;
2741 }
2742 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
2743 control->avail_min = sync_ptr.c.control.avail_min;
2744 else
2745 sync_ptr.c.control.avail_min = control->avail_min;
2746 sync_ptr.s.status.state = status->state;
2747 sync_ptr.s.status.hw_ptr = status->hw_ptr;
2748 sync_ptr.s.status.tstamp = status->tstamp;
2749 sync_ptr.s.status.suspended_state = status->suspended_state;
2750 snd_pcm_stream_unlock_irq(substream);
2751 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
2752 return -EFAULT;
2753 return 0;
2754 }
2755
2756 static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
2757 {
2758 struct snd_pcm_runtime *runtime = substream->runtime;
2759 int arg;
2760
2761 if (get_user(arg, _arg))
2762 return -EFAULT;
2763 if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
2764 return -EINVAL;
2765 runtime->tstamp_type = arg;
2766 return 0;
2767 }
2768
2769 static int snd_pcm_common_ioctl(struct file *file,
2770 struct snd_pcm_substream *substream,
2771 unsigned int cmd, void __user *arg)
2772 {
2773 switch (cmd) {
2774 case SNDRV_PCM_IOCTL_PVERSION:
2775 return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
2776 case SNDRV_PCM_IOCTL_INFO:
2777 return snd_pcm_info_user(substream, arg);
2778 case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */
2779 return 0;
2780 case SNDRV_PCM_IOCTL_TTSTAMP:
2781 return snd_pcm_tstamp(substream, arg);
2782 case SNDRV_PCM_IOCTL_HW_REFINE:
2783 return snd_pcm_hw_refine_user(substream, arg);
2784 case SNDRV_PCM_IOCTL_HW_PARAMS:
2785 return snd_pcm_hw_params_user(substream, arg);
2786 case SNDRV_PCM_IOCTL_HW_FREE:
2787 return snd_pcm_hw_free(substream);
2788 case SNDRV_PCM_IOCTL_SW_PARAMS:
2789 return snd_pcm_sw_params_user(substream, arg);
2790 case SNDRV_PCM_IOCTL_STATUS:
2791 return snd_pcm_status_user(substream, arg, false);
2792 case SNDRV_PCM_IOCTL_STATUS_EXT:
2793 return snd_pcm_status_user(substream, arg, true);
2794 case SNDRV_PCM_IOCTL_CHANNEL_INFO:
2795 return snd_pcm_channel_info_user(substream, arg);
2796 case SNDRV_PCM_IOCTL_PREPARE:
2797 return snd_pcm_prepare(substream, file);
2798 case SNDRV_PCM_IOCTL_RESET:
2799 return snd_pcm_reset(substream);
2800 case SNDRV_PCM_IOCTL_START:
2801 return snd_pcm_start_lock_irq(substream);
2802 case SNDRV_PCM_IOCTL_LINK:
2803 return snd_pcm_link(substream, (int)(unsigned long) arg);
2804 case SNDRV_PCM_IOCTL_UNLINK:
2805 return snd_pcm_unlink(substream);
2806 case SNDRV_PCM_IOCTL_RESUME:
2807 return snd_pcm_resume(substream);
2808 case SNDRV_PCM_IOCTL_XRUN:
2809 return snd_pcm_xrun(substream);
2810 case SNDRV_PCM_IOCTL_HWSYNC:
2811 return snd_pcm_hwsync(substream);
2812 case SNDRV_PCM_IOCTL_DELAY:
2813 {
2814 snd_pcm_sframes_t delay = snd_pcm_delay(substream);
2815 snd_pcm_sframes_t __user *res = arg;
2816
2817 if (delay < 0)
2818 return delay;
2819 if (put_user(delay, res))
2820 return -EFAULT;
2821 return 0;
2822 }
2823 case SNDRV_PCM_IOCTL_SYNC_PTR:
2824 return snd_pcm_sync_ptr(substream, arg);
2825 #ifdef CONFIG_SND_SUPPORT_OLD_API
2826 case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
2827 return snd_pcm_hw_refine_old_user(substream, arg);
2828 case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
2829 return snd_pcm_hw_params_old_user(substream, arg);
2830 #endif
2831 case SNDRV_PCM_IOCTL_DRAIN:
2832 return snd_pcm_drain(substream, file);
2833 case SNDRV_PCM_IOCTL_DROP:
2834 return snd_pcm_drop(substream);
2835 case SNDRV_PCM_IOCTL_PAUSE:
2836 return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
2837 substream,
2838 (int)(unsigned long)arg);
2839 }
2840 pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
2841 return -ENOTTY;
2842 }
2843
2844 static int snd_pcm_common_ioctl1(struct file *file,
2845 struct snd_pcm_substream *substream,
2846 unsigned int cmd, void __user *arg)
2847 {
2848 struct snd_card *card = substream->pcm->card;
2849 int res;
2850
2851 snd_power_lock(card);
2852 res = snd_power_wait(card, SNDRV_CTL_POWER_D0);
2853 if (res >= 0)
2854 res = snd_pcm_common_ioctl(file, substream, cmd, arg);
2855 snd_power_unlock(card);
2856 return res;
2857 }
2858
2859 static int snd_pcm_playback_ioctl1(struct file *file,
2860 struct snd_pcm_substream *substream,
2861 unsigned int cmd, void __user *arg)
2862 {
2863 if (PCM_RUNTIME_CHECK(substream))
2864 return -ENXIO;
2865 if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
2866 return -EINVAL;
2867 switch (cmd) {
2868 case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
2869 {
2870 struct snd_xferi xferi;
2871 struct snd_xferi __user *_xferi = arg;
2872 struct snd_pcm_runtime *runtime = substream->runtime;
2873 snd_pcm_sframes_t result;
2874 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2875 return -EBADFD;
2876 if (put_user(0, &_xferi->result))
2877 return -EFAULT;
2878 if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
2879 return -EFAULT;
2880 result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
2881 __put_user(result, &_xferi->result);
2882 return result < 0 ? result : 0;
2883 }
2884 case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
2885 {
2886 struct snd_xfern xfern;
2887 struct snd_xfern __user *_xfern = arg;
2888 struct snd_pcm_runtime *runtime = substream->runtime;
2889 void __user **bufs;
2890 snd_pcm_sframes_t result;
2891 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2892 return -EBADFD;
2893 if (runtime->channels > 128)
2894 return -EINVAL;
2895 if (put_user(0, &_xfern->result))
2896 return -EFAULT;
2897 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2898 return -EFAULT;
2899
2900 bufs = memdup_user(xfern.bufs,
2901 sizeof(void *) * runtime->channels);
2902 if (IS_ERR(bufs))
2903 return PTR_ERR(bufs);
2904 result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
2905 kfree(bufs);
2906 __put_user(result, &_xfern->result);
2907 return result < 0 ? result : 0;
2908 }
2909 case SNDRV_PCM_IOCTL_REWIND:
2910 {
2911 snd_pcm_uframes_t frames;
2912 snd_pcm_uframes_t __user *_frames = arg;
2913 snd_pcm_sframes_t result;
2914 if (get_user(frames, _frames))
2915 return -EFAULT;
2916 if (put_user(0, _frames))
2917 return -EFAULT;
2918 result = snd_pcm_playback_rewind(substream, frames);
2919 __put_user(result, _frames);
2920 return result < 0 ? result : 0;
2921 }
2922 case SNDRV_PCM_IOCTL_FORWARD:
2923 {
2924 snd_pcm_uframes_t frames;
2925 snd_pcm_uframes_t __user *_frames = arg;
2926 snd_pcm_sframes_t result;
2927 if (get_user(frames, _frames))
2928 return -EFAULT;
2929 if (put_user(0, _frames))
2930 return -EFAULT;
2931 result = snd_pcm_playback_forward(substream, frames);
2932 __put_user(result, _frames);
2933 return result < 0 ? result : 0;
2934 }
2935 }
2936 return snd_pcm_common_ioctl1(file, substream, cmd, arg);
2937 }
2938
2939 static int snd_pcm_capture_ioctl1(struct file *file,
2940 struct snd_pcm_substream *substream,
2941 unsigned int cmd, void __user *arg)
2942 {
2943 if (PCM_RUNTIME_CHECK(substream))
2944 return -ENXIO;
2945 if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_CAPTURE))
2946 return -EINVAL;
2947 switch (cmd) {
2948 case SNDRV_PCM_IOCTL_READI_FRAMES:
2949 {
2950 struct snd_xferi xferi;
2951 struct snd_xferi __user *_xferi = arg;
2952 struct snd_pcm_runtime *runtime = substream->runtime;
2953 snd_pcm_sframes_t result;
2954 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2955 return -EBADFD;
2956 if (put_user(0, &_xferi->result))
2957 return -EFAULT;
2958 if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
2959 return -EFAULT;
2960 result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
2961 __put_user(result, &_xferi->result);
2962 return result < 0 ? result : 0;
2963 }
2964 case SNDRV_PCM_IOCTL_READN_FRAMES:
2965 {
2966 struct snd_xfern xfern;
2967 struct snd_xfern __user *_xfern = arg;
2968 struct snd_pcm_runtime *runtime = substream->runtime;
2969 void *bufs;
2970 snd_pcm_sframes_t result;
2971 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2972 return -EBADFD;
2973 if (runtime->channels > 128)
2974 return -EINVAL;
2975 if (put_user(0, &_xfern->result))
2976 return -EFAULT;
2977 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2978 return -EFAULT;
2979
2980 bufs = memdup_user(xfern.bufs,
2981 sizeof(void *) * runtime->channels);
2982 if (IS_ERR(bufs))
2983 return PTR_ERR(bufs);
2984 result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
2985 kfree(bufs);
2986 __put_user(result, &_xfern->result);
2987 return result < 0 ? result : 0;
2988 }
2989 case SNDRV_PCM_IOCTL_REWIND:
2990 {
2991 snd_pcm_uframes_t frames;
2992 snd_pcm_uframes_t __user *_frames = arg;
2993 snd_pcm_sframes_t result;
2994 if (get_user(frames, _frames))
2995 return -EFAULT;
2996 if (put_user(0, _frames))
2997 return -EFAULT;
2998 result = snd_pcm_capture_rewind(substream, frames);
2999 __put_user(result, _frames);
3000 return result < 0 ? result : 0;
3001 }
3002 case SNDRV_PCM_IOCTL_FORWARD:
3003 {
3004 snd_pcm_uframes_t frames;
3005 snd_pcm_uframes_t __user *_frames = arg;
3006 snd_pcm_sframes_t result;
3007 if (get_user(frames, _frames))
3008 return -EFAULT;
3009 if (put_user(0, _frames))
3010 return -EFAULT;
3011 result = snd_pcm_capture_forward(substream, frames);
3012 __put_user(result, _frames);
3013 return result < 0 ? result : 0;
3014 }
3015 }
3016 return snd_pcm_common_ioctl1(file, substream, cmd, arg);
3017 }
3018
3019 static long snd_pcm_playback_ioctl(struct file *file, unsigned int cmd,
3020 unsigned long arg)
3021 {
3022 struct snd_pcm_file *pcm_file;
3023
3024 pcm_file = file->private_data;
3025
3026 if (((cmd >> 8) & 0xff) != 'A')
3027 return -ENOTTY;
3028
3029 return snd_pcm_playback_ioctl1(file, pcm_file->substream, cmd,
3030 (void __user *)arg);
3031 }
3032
3033 static long snd_pcm_capture_ioctl(struct file *file, unsigned int cmd,
3034 unsigned long arg)
3035 {
3036 struct snd_pcm_file *pcm_file;
3037
3038 pcm_file = file->private_data;
3039
3040 if (((cmd >> 8) & 0xff) != 'A')
3041 return -ENOTTY;
3042
3043 return snd_pcm_capture_ioctl1(file, pcm_file->substream, cmd,
3044 (void __user *)arg);
3045 }
3046
3047 /**
3048 * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
3049 * @substream: PCM substream
3050 * @cmd: IOCTL cmd
3051 * @arg: IOCTL argument
3052 *
3053 * The function is provided primarily for OSS layer and USB gadget drivers,
3054 * and it allows only the limited set of ioctls (hw_params, sw_params,
3055 * prepare, start, drain, drop, forward).
3056 */
3057 int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3058 unsigned int cmd, void *arg)
3059 {
3060 snd_pcm_uframes_t *frames = arg;
3061 snd_pcm_sframes_t result;
3062
3063 switch (cmd) {
3064 case SNDRV_PCM_IOCTL_FORWARD:
3065 {
3066 /* provided only for OSS; capture-only and no value returned */
3067 if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
3068 return -EINVAL;
3069 result = snd_pcm_capture_forward(substream, *frames);
3070 return result < 0 ? result : 0;
3071 }
3072 case SNDRV_PCM_IOCTL_HW_PARAMS:
3073 return snd_pcm_hw_params(substream, arg);
3074 case SNDRV_PCM_IOCTL_SW_PARAMS:
3075 return snd_pcm_sw_params(substream, arg);
3076 case SNDRV_PCM_IOCTL_PREPARE:
3077 return snd_pcm_prepare(substream, NULL);
3078 case SNDRV_PCM_IOCTL_START:
3079 return snd_pcm_start_lock_irq(substream);
3080 case SNDRV_PCM_IOCTL_DRAIN:
3081 return snd_pcm_drain(substream, NULL);
3082 case SNDRV_PCM_IOCTL_DROP:
3083 return snd_pcm_drop(substream);
3084 case SNDRV_PCM_IOCTL_DELAY:
3085 {
3086 result = snd_pcm_delay(substream);
3087 if (result < 0)
3088 return result;
3089 *frames = result;
3090 return 0;
3091 }
3092 default:
3093 return -EINVAL;
3094 }
3095 }
3096 EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
3097
3098 static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
3099 loff_t * offset)
3100 {
3101 struct snd_pcm_file *pcm_file;
3102 struct snd_pcm_substream *substream;
3103 struct snd_pcm_runtime *runtime;
3104 snd_pcm_sframes_t result;
3105
3106 pcm_file = file->private_data;
3107 substream = pcm_file->substream;
3108 if (PCM_RUNTIME_CHECK(substream))
3109 return -ENXIO;
3110 runtime = substream->runtime;
3111 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3112 return -EBADFD;
3113 if (!frame_aligned(runtime, count))
3114 return -EINVAL;
3115 count = bytes_to_frames(runtime, count);
3116 result = snd_pcm_lib_read(substream, buf, count);
3117 if (result > 0)
3118 result = frames_to_bytes(runtime, result);
3119 return result;
3120 }
3121
3122 static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
3123 size_t count, loff_t * offset)
3124 {
3125 struct snd_pcm_file *pcm_file;
3126 struct snd_pcm_substream *substream;
3127 struct snd_pcm_runtime *runtime;
3128 snd_pcm_sframes_t result;
3129
3130 pcm_file = file->private_data;
3131 substream = pcm_file->substream;
3132 if (PCM_RUNTIME_CHECK(substream))
3133 return -ENXIO;
3134 runtime = substream->runtime;
3135 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3136 return -EBADFD;
3137 if (!frame_aligned(runtime, count))
3138 return -EINVAL;
3139 count = bytes_to_frames(runtime, count);
3140 result = snd_pcm_lib_write(substream, buf, count);
3141 if (result > 0)
3142 result = frames_to_bytes(runtime, result);
3143 return result;
3144 }
3145
3146 static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
3147 {
3148 struct snd_pcm_file *pcm_file;
3149 struct snd_pcm_substream *substream;
3150 struct snd_pcm_runtime *runtime;
3151 snd_pcm_sframes_t result;
3152 unsigned long i;
3153 void __user **bufs;
3154 snd_pcm_uframes_t frames;
3155
3156 pcm_file = iocb->ki_filp->private_data;
3157 substream = pcm_file->substream;
3158 if (PCM_RUNTIME_CHECK(substream))
3159 return -ENXIO;
3160 runtime = substream->runtime;
3161 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3162 return -EBADFD;
3163 if (!iter_is_iovec(to))
3164 return -EINVAL;
3165 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
3166 return -EINVAL;
3167 if (!frame_aligned(runtime, to->iov->iov_len))
3168 return -EINVAL;
3169 frames = bytes_to_samples(runtime, to->iov->iov_len);
3170 bufs = kmalloc(sizeof(void *) * to->nr_segs, GFP_KERNEL);
3171 if (bufs == NULL)
3172 return -ENOMEM;
3173 for (i = 0; i < to->nr_segs; ++i)
3174 bufs[i] = to->iov[i].iov_base;
3175 result = snd_pcm_lib_readv(substream, bufs, frames);
3176 if (result > 0)
3177 result = frames_to_bytes(runtime, result);
3178 kfree(bufs);
3179 return result;
3180 }
3181
3182 static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
3183 {
3184 struct snd_pcm_file *pcm_file;
3185 struct snd_pcm_substream *substream;
3186 struct snd_pcm_runtime *runtime;
3187 snd_pcm_sframes_t result;
3188 unsigned long i;
3189 void __user **bufs;
3190 snd_pcm_uframes_t frames;
3191
3192 pcm_file = iocb->ki_filp->private_data;
3193 substream = pcm_file->substream;
3194 if (PCM_RUNTIME_CHECK(substream))
3195 return -ENXIO;
3196 runtime = substream->runtime;
3197 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3198 return -EBADFD;
3199 if (!iter_is_iovec(from))
3200 return -EINVAL;
3201 if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
3202 !frame_aligned(runtime, from->iov->iov_len))
3203 return -EINVAL;
3204 frames = bytes_to_samples(runtime, from->iov->iov_len);
3205 bufs = kmalloc(sizeof(void *) * from->nr_segs, GFP_KERNEL);
3206 if (bufs == NULL)
3207 return -ENOMEM;
3208 for (i = 0; i < from->nr_segs; ++i)
3209 bufs[i] = from->iov[i].iov_base;
3210 result = snd_pcm_lib_writev(substream, bufs, frames);
3211 if (result > 0)
3212 result = frames_to_bytes(runtime, result);
3213 kfree(bufs);
3214 return result;
3215 }
3216
3217 static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait)
3218 {
3219 struct snd_pcm_file *pcm_file;
3220 struct snd_pcm_substream *substream;
3221 struct snd_pcm_runtime *runtime;
3222 unsigned int mask;
3223 snd_pcm_uframes_t avail;
3224
3225 pcm_file = file->private_data;
3226
3227 substream = pcm_file->substream;
3228 if (PCM_RUNTIME_CHECK(substream))
3229 return POLLOUT | POLLWRNORM | POLLERR;
3230 runtime = substream->runtime;
3231
3232 poll_wait(file, &runtime->sleep, wait);
3233
3234 snd_pcm_stream_lock_irq(substream);
3235 avail = snd_pcm_playback_avail(runtime);
3236 switch (runtime->status->state) {
3237 case SNDRV_PCM_STATE_RUNNING:
3238 case SNDRV_PCM_STATE_PREPARED:
3239 case SNDRV_PCM_STATE_PAUSED:
3240 if (avail >= runtime->control->avail_min) {
3241 mask = POLLOUT | POLLWRNORM;
3242 break;
3243 }
3244 /* Fall through */
3245 case SNDRV_PCM_STATE_DRAINING:
3246 mask = 0;
3247 break;
3248 default:
3249 mask = POLLOUT | POLLWRNORM | POLLERR;
3250 break;
3251 }
3252 snd_pcm_stream_unlock_irq(substream);
3253 return mask;
3254 }
3255
3256 static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait)
3257 {
3258 struct snd_pcm_file *pcm_file;
3259 struct snd_pcm_substream *substream;
3260 struct snd_pcm_runtime *runtime;
3261 unsigned int mask;
3262 snd_pcm_uframes_t avail;
3263
3264 pcm_file = file->private_data;
3265
3266 substream = pcm_file->substream;
3267 if (PCM_RUNTIME_CHECK(substream))
3268 return POLLIN | POLLRDNORM | POLLERR;
3269 runtime = substream->runtime;
3270
3271 poll_wait(file, &runtime->sleep, wait);
3272
3273 snd_pcm_stream_lock_irq(substream);
3274 avail = snd_pcm_capture_avail(runtime);
3275 switch (runtime->status->state) {
3276 case SNDRV_PCM_STATE_RUNNING:
3277 case SNDRV_PCM_STATE_PREPARED:
3278 case SNDRV_PCM_STATE_PAUSED:
3279 if (avail >= runtime->control->avail_min) {
3280 mask = POLLIN | POLLRDNORM;
3281 break;
3282 }
3283 mask = 0;
3284 break;
3285 case SNDRV_PCM_STATE_DRAINING:
3286 if (avail > 0) {
3287 mask = POLLIN | POLLRDNORM;
3288 break;
3289 }
3290 /* Fall through */
3291 default:
3292 mask = POLLIN | POLLRDNORM | POLLERR;
3293 break;
3294 }
3295 snd_pcm_stream_unlock_irq(substream);
3296 return mask;
3297 }
3298
3299 /*
3300 * mmap support
3301 */
3302
3303 /*
3304 * Only on coherent architectures, we can mmap the status and the control records
3305 * for effcient data transfer. On others, we have to use HWSYNC ioctl...
3306 */
3307 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3308 /*
3309 * mmap status record
3310 */
3311 static int snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3312 {
3313 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3314 struct snd_pcm_runtime *runtime;
3315
3316 if (substream == NULL)
3317 return VM_FAULT_SIGBUS;
3318 runtime = substream->runtime;
3319 vmf->page = virt_to_page(runtime->status);
3320 get_page(vmf->page);
3321 return 0;
3322 }
3323
3324 static const struct vm_operations_struct snd_pcm_vm_ops_status =
3325 {
3326 .fault = snd_pcm_mmap_status_fault,
3327 };
3328
3329 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3330 struct vm_area_struct *area)
3331 {
3332 long size;
3333 if (!(area->vm_flags & VM_READ))
3334 return -EINVAL;
3335 size = area->vm_end - area->vm_start;
3336 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3337 return -EINVAL;
3338 area->vm_ops = &snd_pcm_vm_ops_status;
3339 area->vm_private_data = substream;
3340 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3341 return 0;
3342 }
3343
3344 /*
3345 * mmap control record
3346 */
3347 static int snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3348 {
3349 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3350 struct snd_pcm_runtime *runtime;
3351
3352 if (substream == NULL)
3353 return VM_FAULT_SIGBUS;
3354 runtime = substream->runtime;
3355 vmf->page = virt_to_page(runtime->control);
3356 get_page(vmf->page);
3357 return 0;
3358 }
3359
3360 static const struct vm_operations_struct snd_pcm_vm_ops_control =
3361 {
3362 .fault = snd_pcm_mmap_control_fault,
3363 };
3364
3365 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3366 struct vm_area_struct *area)
3367 {
3368 long size;
3369 if (!(area->vm_flags & VM_READ))
3370 return -EINVAL;
3371 size = area->vm_end - area->vm_start;
3372 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3373 return -EINVAL;
3374 area->vm_ops = &snd_pcm_vm_ops_control;
3375 area->vm_private_data = substream;
3376 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3377 return 0;
3378 }
3379
3380 static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
3381 {
3382 if (pcm_file->no_compat_mmap)
3383 return false;
3384 /* Disallow the status/control mmap when SYNC_APPLPTR flag is set;
3385 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3386 * thus it effectively assures the manual update of appl_ptr.
3387 * In theory, it should be enough to disallow only PCM control mmap,
3388 * but since the current alsa-lib implementation requires both status
3389 * and control mmaps always paired, we have to disable both of them.
3390 */
3391 if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
3392 return false;
3393 return true;
3394 }
3395
3396 #else /* ! coherent mmap */
3397 /*
3398 * don't support mmap for status and control records.
3399 */
3400 #define pcm_status_mmap_allowed(pcm_file) false
3401
3402 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3403 struct vm_area_struct *area)
3404 {
3405 return -ENXIO;
3406 }
3407 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3408 struct vm_area_struct *area)
3409 {
3410 return -ENXIO;
3411 }
3412 #endif /* coherent mmap */
3413
3414 static inline struct page *
3415 snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
3416 {
3417 void *vaddr = substream->runtime->dma_area + ofs;
3418 return virt_to_page(vaddr);
3419 }
3420
3421 /*
3422 * fault callback for mmapping a RAM page
3423 */
3424 static int snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3425 {
3426 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3427 struct snd_pcm_runtime *runtime;
3428 unsigned long offset;
3429 struct page * page;
3430 size_t dma_bytes;
3431
3432 if (substream == NULL)
3433 return VM_FAULT_SIGBUS;
3434 runtime = substream->runtime;
3435 offset = vmf->pgoff << PAGE_SHIFT;
3436 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3437 if (offset > dma_bytes - PAGE_SIZE)
3438 return VM_FAULT_SIGBUS;
3439 if (substream->ops->page)
3440 page = substream->ops->page(substream, offset);
3441 else
3442 page = snd_pcm_default_page_ops(substream, offset);
3443 if (!page)
3444 return VM_FAULT_SIGBUS;
3445 get_page(page);
3446 vmf->page = page;
3447 return 0;
3448 }
3449
3450 static const struct vm_operations_struct snd_pcm_vm_ops_data = {
3451 .open = snd_pcm_mmap_data_open,
3452 .close = snd_pcm_mmap_data_close,
3453 };
3454
3455 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3456 .open = snd_pcm_mmap_data_open,
3457 .close = snd_pcm_mmap_data_close,
3458 .fault = snd_pcm_mmap_data_fault,
3459 };
3460
3461 /*
3462 * mmap the DMA buffer on RAM
3463 */
3464
3465 /**
3466 * snd_pcm_lib_default_mmap - Default PCM data mmap function
3467 * @substream: PCM substream
3468 * @area: VMA
3469 *
3470 * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
3471 * this function is invoked implicitly.
3472 */
3473 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3474 struct vm_area_struct *area)
3475 {
3476 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3477 #ifdef CONFIG_GENERIC_ALLOCATOR
3478 if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
3479 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
3480 return remap_pfn_range(area, area->vm_start,
3481 substream->dma_buffer.addr >> PAGE_SHIFT,
3482 area->vm_end - area->vm_start, area->vm_page_prot);
3483 }
3484 #endif /* CONFIG_GENERIC_ALLOCATOR */
3485 #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
3486 if (!substream->ops->page &&
3487 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
3488 return dma_mmap_coherent(substream->dma_buffer.dev.dev,
3489 area,
3490 substream->runtime->dma_area,
3491 substream->runtime->dma_addr,
3492 area->vm_end - area->vm_start);
3493 #endif /* CONFIG_X86 */
3494 /* mmap with fault handler */
3495 area->vm_ops = &snd_pcm_vm_ops_data_fault;
3496 return 0;
3497 }
3498 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3499
3500 /*
3501 * mmap the DMA buffer on I/O memory area
3502 */
3503 #if SNDRV_PCM_INFO_MMAP_IOMEM
3504 /**
3505 * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3506 * @substream: PCM substream
3507 * @area: VMA
3508 *
3509 * When your hardware uses the iomapped pages as the hardware buffer and
3510 * wants to mmap it, pass this function as mmap pcm_ops. Note that this
3511 * is supposed to work only on limited architectures.
3512 */
3513 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3514 struct vm_area_struct *area)
3515 {
3516 struct snd_pcm_runtime *runtime = substream->runtime;;
3517
3518 area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3519 return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3520 }
3521 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
3522 #endif /* SNDRV_PCM_INFO_MMAP */
3523
3524 /*
3525 * mmap DMA buffer
3526 */
3527 int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
3528 struct vm_area_struct *area)
3529 {
3530 struct snd_pcm_runtime *runtime;
3531 long size;
3532 unsigned long offset;
3533 size_t dma_bytes;
3534 int err;
3535
3536 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3537 if (!(area->vm_flags & (VM_WRITE|VM_READ)))
3538 return -EINVAL;
3539 } else {
3540 if (!(area->vm_flags & VM_READ))
3541 return -EINVAL;
3542 }
3543 runtime = substream->runtime;
3544 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3545 return -EBADFD;
3546 if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
3547 return -ENXIO;
3548 if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
3549 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
3550 return -EINVAL;
3551 size = area->vm_end - area->vm_start;
3552 offset = area->vm_pgoff << PAGE_SHIFT;
3553 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3554 if ((size_t)size > dma_bytes)
3555 return -EINVAL;
3556 if (offset > dma_bytes - size)
3557 return -EINVAL;
3558
3559 area->vm_ops = &snd_pcm_vm_ops_data;
3560 area->vm_private_data = substream;
3561 if (substream->ops->mmap)
3562 err = substream->ops->mmap(substream, area);
3563 else
3564 err = snd_pcm_lib_default_mmap(substream, area);
3565 if (!err)
3566 atomic_inc(&substream->mmap_count);
3567 return err;
3568 }
3569 EXPORT_SYMBOL(snd_pcm_mmap_data);
3570
3571 static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
3572 {
3573 struct snd_pcm_file * pcm_file;
3574 struct snd_pcm_substream *substream;
3575 unsigned long offset;
3576
3577 pcm_file = file->private_data;
3578 substream = pcm_file->substream;
3579 if (PCM_RUNTIME_CHECK(substream))
3580 return -ENXIO;
3581
3582 offset = area->vm_pgoff << PAGE_SHIFT;
3583 switch (offset) {
3584 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3585 if (!pcm_status_mmap_allowed(pcm_file))
3586 return -ENXIO;
3587 return snd_pcm_mmap_status(substream, file, area);
3588 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3589 if (!pcm_status_mmap_allowed(pcm_file))
3590 return -ENXIO;
3591 return snd_pcm_mmap_control(substream, file, area);
3592 default:
3593 return snd_pcm_mmap_data(substream, file, area);
3594 }
3595 return 0;
3596 }
3597
3598 static int snd_pcm_fasync(int fd, struct file * file, int on)
3599 {
3600 struct snd_pcm_file * pcm_file;
3601 struct snd_pcm_substream *substream;
3602 struct snd_pcm_runtime *runtime;
3603
3604 pcm_file = file->private_data;
3605 substream = pcm_file->substream;
3606 if (PCM_RUNTIME_CHECK(substream))
3607 return -ENXIO;
3608 runtime = substream->runtime;
3609 return fasync_helper(fd, file, on, &runtime->fasync);
3610 }
3611
3612 /*
3613 * ioctl32 compat
3614 */
3615 #ifdef CONFIG_COMPAT
3616 #include "pcm_compat.c"
3617 #else
3618 #define snd_pcm_ioctl_compat NULL
3619 #endif
3620
3621 /*
3622 * To be removed helpers to keep binary compatibility
3623 */
3624
3625 #ifdef CONFIG_SND_SUPPORT_OLD_API
3626 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
3627 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
3628
3629 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
3630 struct snd_pcm_hw_params_old *oparams)
3631 {
3632 unsigned int i;
3633
3634 memset(params, 0, sizeof(*params));
3635 params->flags = oparams->flags;
3636 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3637 params->masks[i].bits[0] = oparams->masks[i];
3638 memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
3639 params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
3640 params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
3641 params->info = oparams->info;
3642 params->msbits = oparams->msbits;
3643 params->rate_num = oparams->rate_num;
3644 params->rate_den = oparams->rate_den;
3645 params->fifo_size = oparams->fifo_size;
3646 }
3647
3648 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
3649 struct snd_pcm_hw_params *params)
3650 {
3651 unsigned int i;
3652
3653 memset(oparams, 0, sizeof(*oparams));
3654 oparams->flags = params->flags;
3655 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3656 oparams->masks[i] = params->masks[i].bits[0];
3657 memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
3658 oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
3659 oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
3660 oparams->info = params->info;
3661 oparams->msbits = params->msbits;
3662 oparams->rate_num = params->rate_num;
3663 oparams->rate_den = params->rate_den;
3664 oparams->fifo_size = params->fifo_size;
3665 }
3666
3667 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3668 struct snd_pcm_hw_params_old __user * _oparams)
3669 {
3670 struct snd_pcm_hw_params *params;
3671 struct snd_pcm_hw_params_old *oparams = NULL;
3672 int err;
3673
3674 params = kmalloc(sizeof(*params), GFP_KERNEL);
3675 if (!params)
3676 return -ENOMEM;
3677
3678 oparams = memdup_user(_oparams, sizeof(*oparams));
3679 if (IS_ERR(oparams)) {
3680 err = PTR_ERR(oparams);
3681 goto out;
3682 }
3683 snd_pcm_hw_convert_from_old_params(params, oparams);
3684 err = snd_pcm_hw_refine(substream, params);
3685 if (err < 0)
3686 goto out_old;
3687
3688 err = fixup_unreferenced_params(substream, params);
3689 if (err < 0)
3690 goto out_old;
3691
3692 snd_pcm_hw_convert_to_old_params(oparams, params);
3693 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3694 err = -EFAULT;
3695 out_old:
3696 kfree(oparams);
3697 out:
3698 kfree(params);
3699 return err;
3700 }
3701
3702 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3703 struct snd_pcm_hw_params_old __user * _oparams)
3704 {
3705 struct snd_pcm_hw_params *params;
3706 struct snd_pcm_hw_params_old *oparams = NULL;
3707 int err;
3708
3709 params = kmalloc(sizeof(*params), GFP_KERNEL);
3710 if (!params)
3711 return -ENOMEM;
3712
3713 oparams = memdup_user(_oparams, sizeof(*oparams));
3714 if (IS_ERR(oparams)) {
3715 err = PTR_ERR(oparams);
3716 goto out;
3717 }
3718
3719 snd_pcm_hw_convert_from_old_params(params, oparams);
3720 err = snd_pcm_hw_params(substream, params);
3721 if (err < 0)
3722 goto out_old;
3723
3724 snd_pcm_hw_convert_to_old_params(oparams, params);
3725 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3726 err = -EFAULT;
3727 out_old:
3728 kfree(oparams);
3729 out:
3730 kfree(params);
3731 return err;
3732 }
3733 #endif /* CONFIG_SND_SUPPORT_OLD_API */
3734
3735 #ifndef CONFIG_MMU
3736 static unsigned long snd_pcm_get_unmapped_area(struct file *file,
3737 unsigned long addr,
3738 unsigned long len,
3739 unsigned long pgoff,
3740 unsigned long flags)
3741 {
3742 struct snd_pcm_file *pcm_file = file->private_data;
3743 struct snd_pcm_substream *substream = pcm_file->substream;
3744 struct snd_pcm_runtime *runtime = substream->runtime;
3745 unsigned long offset = pgoff << PAGE_SHIFT;
3746
3747 switch (offset) {
3748 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3749 return (unsigned long)runtime->status;
3750 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3751 return (unsigned long)runtime->control;
3752 default:
3753 return (unsigned long)runtime->dma_area + offset;
3754 }
3755 }
3756 #else
3757 # define snd_pcm_get_unmapped_area NULL
3758 #endif
3759
3760 /*
3761 * Register section
3762 */
3763
3764 const struct file_operations snd_pcm_f_ops[2] = {
3765 {
3766 .owner = THIS_MODULE,
3767 .write = snd_pcm_write,
3768 .write_iter = snd_pcm_writev,
3769 .open = snd_pcm_playback_open,
3770 .release = snd_pcm_release,
3771 .llseek = no_llseek,
3772 .poll = snd_pcm_playback_poll,
3773 .unlocked_ioctl = snd_pcm_playback_ioctl,
3774 .compat_ioctl = snd_pcm_ioctl_compat,
3775 .mmap = snd_pcm_mmap,
3776 .fasync = snd_pcm_fasync,
3777 .get_unmapped_area = snd_pcm_get_unmapped_area,
3778 },
3779 {
3780 .owner = THIS_MODULE,
3781 .read = snd_pcm_read,
3782 .read_iter = snd_pcm_readv,
3783 .open = snd_pcm_capture_open,
3784 .release = snd_pcm_release,
3785 .llseek = no_llseek,
3786 .poll = snd_pcm_capture_poll,
3787 .unlocked_ioctl = snd_pcm_capture_ioctl,
3788 .compat_ioctl = snd_pcm_ioctl_compat,
3789 .mmap = snd_pcm_mmap,
3790 .fasync = snd_pcm_fasync,
3791 .get_unmapped_area = snd_pcm_get_unmapped_area,
3792 }
3793 };