]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - sound/core/pcm_native.c
f08772568c1709ab560d7ed6db1bf418b08c984c
[mirror_ubuntu-bionic-kernel.git] / sound / core / pcm_native.c
1 /*
2 * Digital Audio (PCM) abstract layer
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 *
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/file.h>
25 #include <linux/slab.h>
26 #include <linux/sched/signal.h>
27 #include <linux/time.h>
28 #include <linux/pm_qos.h>
29 #include <linux/io.h>
30 #include <linux/dma-mapping.h>
31 #include <sound/core.h>
32 #include <sound/control.h>
33 #include <sound/info.h>
34 #include <sound/pcm.h>
35 #include <sound/pcm_params.h>
36 #include <sound/timer.h>
37 #include <sound/minors.h>
38 #include <linux/uio.h>
39
40 #include "pcm_local.h"
41
42 #ifdef CONFIG_SND_DEBUG
43 #define CREATE_TRACE_POINTS
44 #include "pcm_param_trace.h"
45 #else
46 #define trace_hw_mask_param_enabled() 0
47 #define trace_hw_interval_param_enabled() 0
48 #define trace_hw_mask_param(substream, type, index, prev, curr)
49 #define trace_hw_interval_param(substream, type, index, prev, curr)
50 #endif
51
52 /*
53 * Compatibility
54 */
55
56 struct snd_pcm_hw_params_old {
57 unsigned int flags;
58 unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
59 SNDRV_PCM_HW_PARAM_ACCESS + 1];
60 struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
61 SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
62 unsigned int rmask;
63 unsigned int cmask;
64 unsigned int info;
65 unsigned int msbits;
66 unsigned int rate_num;
67 unsigned int rate_den;
68 snd_pcm_uframes_t fifo_size;
69 unsigned char reserved[64];
70 };
71
72 #ifdef CONFIG_SND_SUPPORT_OLD_API
73 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
74 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
75
76 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
77 struct snd_pcm_hw_params_old __user * _oparams);
78 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
79 struct snd_pcm_hw_params_old __user * _oparams);
80 #endif
81 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
82
83 /*
84 *
85 */
86
87 static DEFINE_RWLOCK(snd_pcm_link_rwlock);
88 static DECLARE_RWSEM(snd_pcm_link_rwsem);
89
90 /* Writer in rwsem may block readers even during its waiting in queue,
91 * and this may lead to a deadlock when the code path takes read sem
92 * twice (e.g. one in snd_pcm_action_nonatomic() and another in
93 * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
94 * spin until it gets the lock.
95 */
96 static inline void down_write_nonblock(struct rw_semaphore *lock)
97 {
98 while (!down_write_trylock(lock))
99 cond_resched();
100 }
101
102 /**
103 * snd_pcm_stream_lock - Lock the PCM stream
104 * @substream: PCM substream
105 *
106 * This locks the PCM stream's spinlock or mutex depending on the nonatomic
107 * flag of the given substream. This also takes the global link rw lock
108 * (or rw sem), too, for avoiding the race with linked streams.
109 */
110 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
111 {
112 if (substream->pcm->nonatomic) {
113 down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
114 mutex_lock(&substream->self_group.mutex);
115 } else {
116 read_lock(&snd_pcm_link_rwlock);
117 spin_lock(&substream->self_group.lock);
118 }
119 }
120 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
121
122 /**
123 * snd_pcm_stream_lock - Unlock the PCM stream
124 * @substream: PCM substream
125 *
126 * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
127 */
128 void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
129 {
130 if (substream->pcm->nonatomic) {
131 mutex_unlock(&substream->self_group.mutex);
132 up_read(&snd_pcm_link_rwsem);
133 } else {
134 spin_unlock(&substream->self_group.lock);
135 read_unlock(&snd_pcm_link_rwlock);
136 }
137 }
138 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
139
140 /**
141 * snd_pcm_stream_lock_irq - Lock the PCM stream
142 * @substream: PCM substream
143 *
144 * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
145 * IRQ (only when nonatomic is false). In nonatomic case, this is identical
146 * as snd_pcm_stream_lock().
147 */
148 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
149 {
150 if (!substream->pcm->nonatomic)
151 local_irq_disable();
152 snd_pcm_stream_lock(substream);
153 }
154 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
155
156 /**
157 * snd_pcm_stream_unlock_irq - Unlock the PCM stream
158 * @substream: PCM substream
159 *
160 * This is a counter-part of snd_pcm_stream_lock_irq().
161 */
162 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
163 {
164 snd_pcm_stream_unlock(substream);
165 if (!substream->pcm->nonatomic)
166 local_irq_enable();
167 }
168 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
169
170 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
171 {
172 unsigned long flags = 0;
173 if (!substream->pcm->nonatomic)
174 local_irq_save(flags);
175 snd_pcm_stream_lock(substream);
176 return flags;
177 }
178 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
179
180 /**
181 * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
182 * @substream: PCM substream
183 * @flags: irq flags
184 *
185 * This is a counter-part of snd_pcm_stream_lock_irqsave().
186 */
187 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
188 unsigned long flags)
189 {
190 snd_pcm_stream_unlock(substream);
191 if (!substream->pcm->nonatomic)
192 local_irq_restore(flags);
193 }
194 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
195
196 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
197 {
198 struct snd_pcm *pcm = substream->pcm;
199 struct snd_pcm_str *pstr = substream->pstr;
200
201 memset(info, 0, sizeof(*info));
202 info->card = pcm->card->number;
203 info->device = pcm->device;
204 info->stream = substream->stream;
205 info->subdevice = substream->number;
206 strlcpy(info->id, pcm->id, sizeof(info->id));
207 strlcpy(info->name, pcm->name, sizeof(info->name));
208 info->dev_class = pcm->dev_class;
209 info->dev_subclass = pcm->dev_subclass;
210 info->subdevices_count = pstr->substream_count;
211 info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
212 strlcpy(info->subname, substream->name, sizeof(info->subname));
213
214 return 0;
215 }
216
217 int snd_pcm_info_user(struct snd_pcm_substream *substream,
218 struct snd_pcm_info __user * _info)
219 {
220 struct snd_pcm_info *info;
221 int err;
222
223 info = kmalloc(sizeof(*info), GFP_KERNEL);
224 if (! info)
225 return -ENOMEM;
226 err = snd_pcm_info(substream, info);
227 if (err >= 0) {
228 if (copy_to_user(_info, info, sizeof(*info)))
229 err = -EFAULT;
230 }
231 kfree(info);
232 return err;
233 }
234
235 static bool hw_support_mmap(struct snd_pcm_substream *substream)
236 {
237 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
238 return false;
239 /* architecture supports dma_mmap_coherent()? */
240 #if defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) || !defined(CONFIG_HAS_DMA)
241 if (!substream->ops->mmap &&
242 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
243 return false;
244 #endif
245 return true;
246 }
247
248 static int constrain_mask_params(struct snd_pcm_substream *substream,
249 struct snd_pcm_hw_params *params)
250 {
251 struct snd_pcm_hw_constraints *constrs =
252 &substream->runtime->hw_constraints;
253 struct snd_mask *m;
254 unsigned int k;
255 struct snd_mask old_mask;
256 int changed;
257
258 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
259 m = hw_param_mask(params, k);
260 if (snd_mask_empty(m))
261 return -EINVAL;
262
263 /* This parameter is not requested to change by a caller. */
264 if (!(params->rmask & (1 << k)))
265 continue;
266
267 if (trace_hw_mask_param_enabled())
268 old_mask = *m;
269
270 changed = snd_mask_refine(m, constrs_mask(constrs, k));
271 if (changed < 0)
272 return changed;
273 if (changed == 0)
274 continue;
275
276 /* Set corresponding flag so that the caller gets it. */
277 trace_hw_mask_param(substream, k, 0, &old_mask, m);
278 params->cmask |= 1 << k;
279 }
280
281 return 0;
282 }
283
284 static int constrain_interval_params(struct snd_pcm_substream *substream,
285 struct snd_pcm_hw_params *params)
286 {
287 struct snd_pcm_hw_constraints *constrs =
288 &substream->runtime->hw_constraints;
289 struct snd_interval *i;
290 unsigned int k;
291 struct snd_interval old_interval;
292 int changed;
293
294 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
295 i = hw_param_interval(params, k);
296 if (snd_interval_empty(i))
297 return -EINVAL;
298
299 /* This parameter is not requested to change by a caller. */
300 if (!(params->rmask & (1 << k)))
301 continue;
302
303 if (trace_hw_interval_param_enabled())
304 old_interval = *i;
305
306 changed = snd_interval_refine(i, constrs_interval(constrs, k));
307 if (changed < 0)
308 return changed;
309 if (changed == 0)
310 continue;
311
312 /* Set corresponding flag so that the caller gets it. */
313 trace_hw_interval_param(substream, k, 0, &old_interval, i);
314 params->cmask |= 1 << k;
315 }
316
317 return 0;
318 }
319
320 static int constrain_params_by_rules(struct snd_pcm_substream *substream,
321 struct snd_pcm_hw_params *params)
322 {
323 struct snd_pcm_hw_constraints *constrs =
324 &substream->runtime->hw_constraints;
325 unsigned int k;
326 unsigned int rstamps[constrs->rules_num];
327 unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
328 unsigned int stamp;
329 struct snd_pcm_hw_rule *r;
330 unsigned int d;
331 struct snd_mask old_mask;
332 struct snd_interval old_interval;
333 bool again;
334 int changed;
335
336 /*
337 * Each application of rule has own sequence number.
338 *
339 * Each member of 'rstamps' array represents the sequence number of
340 * recent application of corresponding rule.
341 */
342 for (k = 0; k < constrs->rules_num; k++)
343 rstamps[k] = 0;
344
345 /*
346 * Each member of 'vstamps' array represents the sequence number of
347 * recent application of rule in which corresponding parameters were
348 * changed.
349 *
350 * In initial state, elements corresponding to parameters requested by
351 * a caller is 1. For unrequested parameters, corresponding members
352 * have 0 so that the parameters are never changed anymore.
353 */
354 for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
355 vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
356
357 /* Due to the above design, actual sequence number starts at 2. */
358 stamp = 2;
359 retry:
360 /* Apply all rules in order. */
361 again = false;
362 for (k = 0; k < constrs->rules_num; k++) {
363 r = &constrs->rules[k];
364
365 /*
366 * Check condition bits of this rule. When the rule has
367 * some condition bits, parameter without the bits is
368 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
369 * is an example of the condition bits.
370 */
371 if (r->cond && !(r->cond & params->flags))
372 continue;
373
374 /*
375 * The 'deps' array includes maximum three dependencies
376 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
377 * member of this array is a sentinel and should be
378 * negative value.
379 *
380 * This rule should be processed in this time when dependent
381 * parameters were changed at former applications of the other
382 * rules.
383 */
384 for (d = 0; r->deps[d] >= 0; d++) {
385 if (vstamps[r->deps[d]] > rstamps[k])
386 break;
387 }
388 if (r->deps[d] < 0)
389 continue;
390
391 if (trace_hw_mask_param_enabled()) {
392 if (hw_is_mask(r->var))
393 old_mask = *hw_param_mask(params, r->var);
394 }
395 if (trace_hw_interval_param_enabled()) {
396 if (hw_is_interval(r->var))
397 old_interval = *hw_param_interval(params, r->var);
398 }
399
400 changed = r->func(params, r);
401 if (changed < 0)
402 return changed;
403
404 /*
405 * When the parameter is changed, notify it to the caller
406 * by corresponding returned bit, then preparing for next
407 * iteration.
408 */
409 if (changed && r->var >= 0) {
410 if (hw_is_mask(r->var)) {
411 trace_hw_mask_param(substream, r->var,
412 k + 1, &old_mask,
413 hw_param_mask(params, r->var));
414 }
415 if (hw_is_interval(r->var)) {
416 trace_hw_interval_param(substream, r->var,
417 k + 1, &old_interval,
418 hw_param_interval(params, r->var));
419 }
420
421 params->cmask |= (1 << r->var);
422 vstamps[r->var] = stamp;
423 again = true;
424 }
425
426 rstamps[k] = stamp++;
427 }
428
429 /* Iterate to evaluate all rules till no parameters are changed. */
430 if (again)
431 goto retry;
432
433 return 0;
434 }
435
436 static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
437 struct snd_pcm_hw_params *params)
438 {
439 const struct snd_interval *i;
440 const struct snd_mask *m;
441 int err;
442
443 if (!params->msbits) {
444 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
445 if (snd_interval_single(i))
446 params->msbits = snd_interval_value(i);
447 }
448
449 if (!params->rate_den) {
450 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
451 if (snd_interval_single(i)) {
452 params->rate_num = snd_interval_value(i);
453 params->rate_den = 1;
454 }
455 }
456
457 if (!params->fifo_size) {
458 m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
459 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
460 if (snd_mask_single(m) && snd_interval_single(i)) {
461 err = substream->ops->ioctl(substream,
462 SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
463 if (err < 0)
464 return err;
465 }
466 }
467
468 if (!params->info) {
469 params->info = substream->runtime->hw.info;
470 params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
471 SNDRV_PCM_INFO_DRAIN_TRIGGER);
472 if (!hw_support_mmap(substream))
473 params->info &= ~(SNDRV_PCM_INFO_MMAP |
474 SNDRV_PCM_INFO_MMAP_VALID);
475 }
476
477 return 0;
478 }
479
480 int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
481 struct snd_pcm_hw_params *params)
482 {
483 int err;
484
485 params->info = 0;
486 params->fifo_size = 0;
487 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
488 params->msbits = 0;
489 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
490 params->rate_num = 0;
491 params->rate_den = 0;
492 }
493
494 err = constrain_mask_params(substream, params);
495 if (err < 0)
496 return err;
497
498 err = constrain_interval_params(substream, params);
499 if (err < 0)
500 return err;
501
502 err = constrain_params_by_rules(substream, params);
503 if (err < 0)
504 return err;
505
506 params->rmask = 0;
507
508 return 0;
509 }
510 EXPORT_SYMBOL(snd_pcm_hw_refine);
511
512 static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
513 struct snd_pcm_hw_params __user * _params)
514 {
515 struct snd_pcm_hw_params *params;
516 int err;
517
518 params = memdup_user(_params, sizeof(*params));
519 if (IS_ERR(params))
520 return PTR_ERR(params);
521
522 err = snd_pcm_hw_refine(substream, params);
523 if (err < 0)
524 goto end;
525
526 err = fixup_unreferenced_params(substream, params);
527 if (err < 0)
528 goto end;
529
530 if (copy_to_user(_params, params, sizeof(*params)))
531 err = -EFAULT;
532 end:
533 kfree(params);
534 return err;
535 }
536
537 static int period_to_usecs(struct snd_pcm_runtime *runtime)
538 {
539 int usecs;
540
541 if (! runtime->rate)
542 return -1; /* invalid */
543
544 /* take 75% of period time as the deadline */
545 usecs = (750000 / runtime->rate) * runtime->period_size;
546 usecs += ((750000 % runtime->rate) * runtime->period_size) /
547 runtime->rate;
548
549 return usecs;
550 }
551
552 static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
553 {
554 snd_pcm_stream_lock_irq(substream);
555 if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
556 substream->runtime->status->state = state;
557 snd_pcm_stream_unlock_irq(substream);
558 }
559
560 static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
561 int event)
562 {
563 #ifdef CONFIG_SND_PCM_TIMER
564 if (substream->timer)
565 snd_timer_notify(substream->timer, event,
566 &substream->runtime->trigger_tstamp);
567 #endif
568 }
569
570 /**
571 * snd_pcm_hw_param_choose - choose a configuration defined by @params
572 * @pcm: PCM instance
573 * @params: the hw_params instance
574 *
575 * Choose one configuration from configuration space defined by @params.
576 * The configuration chosen is that obtained fixing in this order:
577 * first access, first format, first subformat, min channels,
578 * min rate, min period time, max buffer size, min tick time
579 *
580 * Return: Zero if successful, or a negative error code on failure.
581 */
582 static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
583 struct snd_pcm_hw_params *params)
584 {
585 static const int vars[] = {
586 SNDRV_PCM_HW_PARAM_ACCESS,
587 SNDRV_PCM_HW_PARAM_FORMAT,
588 SNDRV_PCM_HW_PARAM_SUBFORMAT,
589 SNDRV_PCM_HW_PARAM_CHANNELS,
590 SNDRV_PCM_HW_PARAM_RATE,
591 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
592 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
593 SNDRV_PCM_HW_PARAM_TICK_TIME,
594 -1
595 };
596 const int *v;
597 struct snd_mask old_mask;
598 struct snd_interval old_interval;
599 int changed;
600
601 for (v = vars; *v != -1; v++) {
602 /* Keep old parameter to trace. */
603 if (trace_hw_mask_param_enabled()) {
604 if (hw_is_mask(*v))
605 old_mask = *hw_param_mask(params, *v);
606 }
607 if (trace_hw_interval_param_enabled()) {
608 if (hw_is_interval(*v))
609 old_interval = *hw_param_interval(params, *v);
610 }
611 if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
612 changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
613 else
614 changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
615 if (snd_BUG_ON(changed < 0))
616 return changed;
617 if (changed == 0)
618 continue;
619
620 /* Trace the changed parameter. */
621 if (hw_is_mask(*v)) {
622 trace_hw_mask_param(pcm, *v, 0, &old_mask,
623 hw_param_mask(params, *v));
624 }
625 if (hw_is_interval(*v)) {
626 trace_hw_interval_param(pcm, *v, 0, &old_interval,
627 hw_param_interval(params, *v));
628 }
629 }
630
631 return 0;
632 }
633
634 static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
635 struct snd_pcm_hw_params *params)
636 {
637 struct snd_pcm_runtime *runtime;
638 int err, usecs;
639 unsigned int bits;
640 snd_pcm_uframes_t frames;
641
642 if (PCM_RUNTIME_CHECK(substream))
643 return -ENXIO;
644 runtime = substream->runtime;
645 snd_pcm_stream_lock_irq(substream);
646 switch (runtime->status->state) {
647 case SNDRV_PCM_STATE_OPEN:
648 case SNDRV_PCM_STATE_SETUP:
649 case SNDRV_PCM_STATE_PREPARED:
650 break;
651 default:
652 snd_pcm_stream_unlock_irq(substream);
653 return -EBADFD;
654 }
655 snd_pcm_stream_unlock_irq(substream);
656 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
657 if (!substream->oss.oss)
658 #endif
659 if (atomic_read(&substream->mmap_count))
660 return -EBADFD;
661
662 params->rmask = ~0U;
663 err = snd_pcm_hw_refine(substream, params);
664 if (err < 0)
665 goto _error;
666
667 err = snd_pcm_hw_params_choose(substream, params);
668 if (err < 0)
669 goto _error;
670
671 err = fixup_unreferenced_params(substream, params);
672 if (err < 0)
673 goto _error;
674
675 if (substream->ops->hw_params != NULL) {
676 err = substream->ops->hw_params(substream, params);
677 if (err < 0)
678 goto _error;
679 }
680
681 runtime->access = params_access(params);
682 runtime->format = params_format(params);
683 runtime->subformat = params_subformat(params);
684 runtime->channels = params_channels(params);
685 runtime->rate = params_rate(params);
686 runtime->period_size = params_period_size(params);
687 runtime->periods = params_periods(params);
688 runtime->buffer_size = params_buffer_size(params);
689 runtime->info = params->info;
690 runtime->rate_num = params->rate_num;
691 runtime->rate_den = params->rate_den;
692 runtime->no_period_wakeup =
693 (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
694 (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
695
696 bits = snd_pcm_format_physical_width(runtime->format);
697 runtime->sample_bits = bits;
698 bits *= runtime->channels;
699 runtime->frame_bits = bits;
700 frames = 1;
701 while (bits % 8 != 0) {
702 bits *= 2;
703 frames *= 2;
704 }
705 runtime->byte_align = bits / 8;
706 runtime->min_align = frames;
707
708 /* Default sw params */
709 runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
710 runtime->period_step = 1;
711 runtime->control->avail_min = runtime->period_size;
712 runtime->start_threshold = 1;
713 runtime->stop_threshold = runtime->buffer_size;
714 runtime->silence_threshold = 0;
715 runtime->silence_size = 0;
716 runtime->boundary = runtime->buffer_size;
717 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
718 runtime->boundary *= 2;
719
720 snd_pcm_timer_resolution_change(substream);
721 snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
722
723 if (pm_qos_request_active(&substream->latency_pm_qos_req))
724 pm_qos_remove_request(&substream->latency_pm_qos_req);
725 if ((usecs = period_to_usecs(runtime)) >= 0)
726 pm_qos_add_request(&substream->latency_pm_qos_req,
727 PM_QOS_CPU_DMA_LATENCY, usecs);
728 return 0;
729 _error:
730 /* hardware might be unusable from this time,
731 so we force application to retry to set
732 the correct hardware parameter settings */
733 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
734 if (substream->ops->hw_free != NULL)
735 substream->ops->hw_free(substream);
736 return err;
737 }
738
739 static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
740 struct snd_pcm_hw_params __user * _params)
741 {
742 struct snd_pcm_hw_params *params;
743 int err;
744
745 params = memdup_user(_params, sizeof(*params));
746 if (IS_ERR(params))
747 return PTR_ERR(params);
748
749 err = snd_pcm_hw_params(substream, params);
750 if (err < 0)
751 goto end;
752
753 if (copy_to_user(_params, params, sizeof(*params)))
754 err = -EFAULT;
755 end:
756 kfree(params);
757 return err;
758 }
759
760 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
761 {
762 struct snd_pcm_runtime *runtime;
763 int result = 0;
764
765 if (PCM_RUNTIME_CHECK(substream))
766 return -ENXIO;
767 runtime = substream->runtime;
768 snd_pcm_stream_lock_irq(substream);
769 switch (runtime->status->state) {
770 case SNDRV_PCM_STATE_SETUP:
771 case SNDRV_PCM_STATE_PREPARED:
772 break;
773 default:
774 snd_pcm_stream_unlock_irq(substream);
775 return -EBADFD;
776 }
777 snd_pcm_stream_unlock_irq(substream);
778 if (atomic_read(&substream->mmap_count))
779 return -EBADFD;
780 if (substream->ops->hw_free)
781 result = substream->ops->hw_free(substream);
782 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
783 pm_qos_remove_request(&substream->latency_pm_qos_req);
784 return result;
785 }
786
787 static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
788 struct snd_pcm_sw_params *params)
789 {
790 struct snd_pcm_runtime *runtime;
791 int err;
792
793 if (PCM_RUNTIME_CHECK(substream))
794 return -ENXIO;
795 runtime = substream->runtime;
796 snd_pcm_stream_lock_irq(substream);
797 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
798 snd_pcm_stream_unlock_irq(substream);
799 return -EBADFD;
800 }
801 snd_pcm_stream_unlock_irq(substream);
802
803 if (params->tstamp_mode < 0 ||
804 params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
805 return -EINVAL;
806 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
807 params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
808 return -EINVAL;
809 if (params->avail_min == 0)
810 return -EINVAL;
811 if (params->silence_size >= runtime->boundary) {
812 if (params->silence_threshold != 0)
813 return -EINVAL;
814 } else {
815 if (params->silence_size > params->silence_threshold)
816 return -EINVAL;
817 if (params->silence_threshold > runtime->buffer_size)
818 return -EINVAL;
819 }
820 err = 0;
821 snd_pcm_stream_lock_irq(substream);
822 runtime->tstamp_mode = params->tstamp_mode;
823 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
824 runtime->tstamp_type = params->tstamp_type;
825 runtime->period_step = params->period_step;
826 runtime->control->avail_min = params->avail_min;
827 runtime->start_threshold = params->start_threshold;
828 runtime->stop_threshold = params->stop_threshold;
829 runtime->silence_threshold = params->silence_threshold;
830 runtime->silence_size = params->silence_size;
831 params->boundary = runtime->boundary;
832 if (snd_pcm_running(substream)) {
833 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
834 runtime->silence_size > 0)
835 snd_pcm_playback_silence(substream, ULONG_MAX);
836 err = snd_pcm_update_state(substream, runtime);
837 }
838 snd_pcm_stream_unlock_irq(substream);
839 return err;
840 }
841
842 static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
843 struct snd_pcm_sw_params __user * _params)
844 {
845 struct snd_pcm_sw_params params;
846 int err;
847 if (copy_from_user(&params, _params, sizeof(params)))
848 return -EFAULT;
849 err = snd_pcm_sw_params(substream, &params);
850 if (copy_to_user(_params, &params, sizeof(params)))
851 return -EFAULT;
852 return err;
853 }
854
855 int snd_pcm_status(struct snd_pcm_substream *substream,
856 struct snd_pcm_status *status)
857 {
858 struct snd_pcm_runtime *runtime = substream->runtime;
859
860 snd_pcm_stream_lock_irq(substream);
861
862 snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
863 &runtime->audio_tstamp_config);
864
865 /* backwards compatible behavior */
866 if (runtime->audio_tstamp_config.type_requested ==
867 SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
868 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
869 runtime->audio_tstamp_config.type_requested =
870 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
871 else
872 runtime->audio_tstamp_config.type_requested =
873 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
874 runtime->audio_tstamp_report.valid = 0;
875 } else
876 runtime->audio_tstamp_report.valid = 1;
877
878 status->state = runtime->status->state;
879 status->suspended_state = runtime->status->suspended_state;
880 if (status->state == SNDRV_PCM_STATE_OPEN)
881 goto _end;
882 status->trigger_tstamp = runtime->trigger_tstamp;
883 if (snd_pcm_running(substream)) {
884 snd_pcm_update_hw_ptr(substream);
885 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
886 status->tstamp = runtime->status->tstamp;
887 status->driver_tstamp = runtime->driver_tstamp;
888 status->audio_tstamp =
889 runtime->status->audio_tstamp;
890 if (runtime->audio_tstamp_report.valid == 1)
891 /* backwards compatibility, no report provided in COMPAT mode */
892 snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
893 &status->audio_tstamp_accuracy,
894 &runtime->audio_tstamp_report);
895
896 goto _tstamp_end;
897 }
898 } else {
899 /* get tstamp only in fallback mode and only if enabled */
900 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
901 snd_pcm_gettime(runtime, &status->tstamp);
902 }
903 _tstamp_end:
904 status->appl_ptr = runtime->control->appl_ptr;
905 status->hw_ptr = runtime->status->hw_ptr;
906 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
907 status->avail = snd_pcm_playback_avail(runtime);
908 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
909 runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
910 status->delay = runtime->buffer_size - status->avail;
911 status->delay += runtime->delay;
912 } else
913 status->delay = 0;
914 } else {
915 status->avail = snd_pcm_capture_avail(runtime);
916 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
917 status->delay = status->avail + runtime->delay;
918 else
919 status->delay = 0;
920 }
921 status->avail_max = runtime->avail_max;
922 status->overrange = runtime->overrange;
923 runtime->avail_max = 0;
924 runtime->overrange = 0;
925 _end:
926 snd_pcm_stream_unlock_irq(substream);
927 return 0;
928 }
929
930 static int snd_pcm_status_user(struct snd_pcm_substream *substream,
931 struct snd_pcm_status __user * _status,
932 bool ext)
933 {
934 struct snd_pcm_status status;
935 int res;
936
937 memset(&status, 0, sizeof(status));
938 /*
939 * with extension, parameters are read/write,
940 * get audio_tstamp_data from user,
941 * ignore rest of status structure
942 */
943 if (ext && get_user(status.audio_tstamp_data,
944 (u32 __user *)(&_status->audio_tstamp_data)))
945 return -EFAULT;
946 res = snd_pcm_status(substream, &status);
947 if (res < 0)
948 return res;
949 if (copy_to_user(_status, &status, sizeof(status)))
950 return -EFAULT;
951 return 0;
952 }
953
954 static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
955 struct snd_pcm_channel_info * info)
956 {
957 struct snd_pcm_runtime *runtime;
958 unsigned int channel;
959
960 channel = info->channel;
961 runtime = substream->runtime;
962 snd_pcm_stream_lock_irq(substream);
963 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
964 snd_pcm_stream_unlock_irq(substream);
965 return -EBADFD;
966 }
967 snd_pcm_stream_unlock_irq(substream);
968 if (channel >= runtime->channels)
969 return -EINVAL;
970 memset(info, 0, sizeof(*info));
971 info->channel = channel;
972 return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
973 }
974
975 static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
976 struct snd_pcm_channel_info __user * _info)
977 {
978 struct snd_pcm_channel_info info;
979 int res;
980
981 if (copy_from_user(&info, _info, sizeof(info)))
982 return -EFAULT;
983 res = snd_pcm_channel_info(substream, &info);
984 if (res < 0)
985 return res;
986 if (copy_to_user(_info, &info, sizeof(info)))
987 return -EFAULT;
988 return 0;
989 }
990
991 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
992 {
993 struct snd_pcm_runtime *runtime = substream->runtime;
994 if (runtime->trigger_master == NULL)
995 return;
996 if (runtime->trigger_master == substream) {
997 if (!runtime->trigger_tstamp_latched)
998 snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
999 } else {
1000 snd_pcm_trigger_tstamp(runtime->trigger_master);
1001 runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
1002 }
1003 runtime->trigger_master = NULL;
1004 }
1005
1006 struct action_ops {
1007 int (*pre_action)(struct snd_pcm_substream *substream, int state);
1008 int (*do_action)(struct snd_pcm_substream *substream, int state);
1009 void (*undo_action)(struct snd_pcm_substream *substream, int state);
1010 void (*post_action)(struct snd_pcm_substream *substream, int state);
1011 };
1012
1013 /*
1014 * this functions is core for handling of linked stream
1015 * Note: the stream state might be changed also on failure
1016 * Note2: call with calling stream lock + link lock
1017 */
1018 static int snd_pcm_action_group(const struct action_ops *ops,
1019 struct snd_pcm_substream *substream,
1020 int state, int do_lock)
1021 {
1022 struct snd_pcm_substream *s = NULL;
1023 struct snd_pcm_substream *s1;
1024 int res = 0, depth = 1;
1025
1026 snd_pcm_group_for_each_entry(s, substream) {
1027 if (do_lock && s != substream) {
1028 if (s->pcm->nonatomic)
1029 mutex_lock_nested(&s->self_group.mutex, depth);
1030 else
1031 spin_lock_nested(&s->self_group.lock, depth);
1032 depth++;
1033 }
1034 res = ops->pre_action(s, state);
1035 if (res < 0)
1036 goto _unlock;
1037 }
1038 snd_pcm_group_for_each_entry(s, substream) {
1039 res = ops->do_action(s, state);
1040 if (res < 0) {
1041 if (ops->undo_action) {
1042 snd_pcm_group_for_each_entry(s1, substream) {
1043 if (s1 == s) /* failed stream */
1044 break;
1045 ops->undo_action(s1, state);
1046 }
1047 }
1048 s = NULL; /* unlock all */
1049 goto _unlock;
1050 }
1051 }
1052 snd_pcm_group_for_each_entry(s, substream) {
1053 ops->post_action(s, state);
1054 }
1055 _unlock:
1056 if (do_lock) {
1057 /* unlock streams */
1058 snd_pcm_group_for_each_entry(s1, substream) {
1059 if (s1 != substream) {
1060 if (s1->pcm->nonatomic)
1061 mutex_unlock(&s1->self_group.mutex);
1062 else
1063 spin_unlock(&s1->self_group.lock);
1064 }
1065 if (s1 == s) /* end */
1066 break;
1067 }
1068 }
1069 return res;
1070 }
1071
1072 /*
1073 * Note: call with stream lock
1074 */
1075 static int snd_pcm_action_single(const struct action_ops *ops,
1076 struct snd_pcm_substream *substream,
1077 int state)
1078 {
1079 int res;
1080
1081 res = ops->pre_action(substream, state);
1082 if (res < 0)
1083 return res;
1084 res = ops->do_action(substream, state);
1085 if (res == 0)
1086 ops->post_action(substream, state);
1087 else if (ops->undo_action)
1088 ops->undo_action(substream, state);
1089 return res;
1090 }
1091
1092 /*
1093 * Note: call with stream lock
1094 */
1095 static int snd_pcm_action(const struct action_ops *ops,
1096 struct snd_pcm_substream *substream,
1097 int state)
1098 {
1099 int res;
1100
1101 if (!snd_pcm_stream_linked(substream))
1102 return snd_pcm_action_single(ops, substream, state);
1103
1104 if (substream->pcm->nonatomic) {
1105 if (!mutex_trylock(&substream->group->mutex)) {
1106 mutex_unlock(&substream->self_group.mutex);
1107 mutex_lock(&substream->group->mutex);
1108 mutex_lock(&substream->self_group.mutex);
1109 }
1110 res = snd_pcm_action_group(ops, substream, state, 1);
1111 mutex_unlock(&substream->group->mutex);
1112 } else {
1113 if (!spin_trylock(&substream->group->lock)) {
1114 spin_unlock(&substream->self_group.lock);
1115 spin_lock(&substream->group->lock);
1116 spin_lock(&substream->self_group.lock);
1117 }
1118 res = snd_pcm_action_group(ops, substream, state, 1);
1119 spin_unlock(&substream->group->lock);
1120 }
1121 return res;
1122 }
1123
1124 /*
1125 * Note: don't use any locks before
1126 */
1127 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
1128 struct snd_pcm_substream *substream,
1129 int state)
1130 {
1131 int res;
1132
1133 snd_pcm_stream_lock_irq(substream);
1134 res = snd_pcm_action(ops, substream, state);
1135 snd_pcm_stream_unlock_irq(substream);
1136 return res;
1137 }
1138
1139 /*
1140 */
1141 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1142 struct snd_pcm_substream *substream,
1143 int state)
1144 {
1145 int res;
1146
1147 down_read(&snd_pcm_link_rwsem);
1148 if (snd_pcm_stream_linked(substream))
1149 res = snd_pcm_action_group(ops, substream, state, 0);
1150 else
1151 res = snd_pcm_action_single(ops, substream, state);
1152 up_read(&snd_pcm_link_rwsem);
1153 return res;
1154 }
1155
1156 /*
1157 * start callbacks
1158 */
1159 static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
1160 {
1161 struct snd_pcm_runtime *runtime = substream->runtime;
1162 if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
1163 return -EBADFD;
1164 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1165 !snd_pcm_playback_data(substream))
1166 return -EPIPE;
1167 runtime->trigger_tstamp_latched = false;
1168 runtime->trigger_master = substream;
1169 return 0;
1170 }
1171
1172 static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
1173 {
1174 if (substream->runtime->trigger_master != substream)
1175 return 0;
1176 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
1177 }
1178
1179 static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
1180 {
1181 if (substream->runtime->trigger_master == substream)
1182 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1183 }
1184
1185 static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
1186 {
1187 struct snd_pcm_runtime *runtime = substream->runtime;
1188 snd_pcm_trigger_tstamp(substream);
1189 runtime->hw_ptr_jiffies = jiffies;
1190 runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
1191 runtime->rate;
1192 runtime->status->state = state;
1193 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1194 runtime->silence_size > 0)
1195 snd_pcm_playback_silence(substream, ULONG_MAX);
1196 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
1197 }
1198
1199 static const struct action_ops snd_pcm_action_start = {
1200 .pre_action = snd_pcm_pre_start,
1201 .do_action = snd_pcm_do_start,
1202 .undo_action = snd_pcm_undo_start,
1203 .post_action = snd_pcm_post_start
1204 };
1205
1206 /**
1207 * snd_pcm_start - start all linked streams
1208 * @substream: the PCM substream instance
1209 *
1210 * Return: Zero if successful, or a negative error code.
1211 * The stream lock must be acquired before calling this function.
1212 */
1213 int snd_pcm_start(struct snd_pcm_substream *substream)
1214 {
1215 return snd_pcm_action(&snd_pcm_action_start, substream,
1216 SNDRV_PCM_STATE_RUNNING);
1217 }
1218
1219 /* take the stream lock and start the streams */
1220 static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
1221 {
1222 return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
1223 SNDRV_PCM_STATE_RUNNING);
1224 }
1225
1226 /*
1227 * stop callbacks
1228 */
1229 static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
1230 {
1231 struct snd_pcm_runtime *runtime = substream->runtime;
1232 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1233 return -EBADFD;
1234 runtime->trigger_master = substream;
1235 return 0;
1236 }
1237
1238 static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
1239 {
1240 if (substream->runtime->trigger_master == substream &&
1241 snd_pcm_running(substream))
1242 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1243 return 0; /* unconditonally stop all substreams */
1244 }
1245
1246 static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
1247 {
1248 struct snd_pcm_runtime *runtime = substream->runtime;
1249 if (runtime->status->state != state) {
1250 snd_pcm_trigger_tstamp(substream);
1251 runtime->status->state = state;
1252 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
1253 }
1254 wake_up(&runtime->sleep);
1255 wake_up(&runtime->tsleep);
1256 }
1257
1258 static const struct action_ops snd_pcm_action_stop = {
1259 .pre_action = snd_pcm_pre_stop,
1260 .do_action = snd_pcm_do_stop,
1261 .post_action = snd_pcm_post_stop
1262 };
1263
1264 /**
1265 * snd_pcm_stop - try to stop all running streams in the substream group
1266 * @substream: the PCM substream instance
1267 * @state: PCM state after stopping the stream
1268 *
1269 * The state of each stream is then changed to the given state unconditionally.
1270 *
1271 * Return: Zero if successful, or a negative error code.
1272 */
1273 int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
1274 {
1275 return snd_pcm_action(&snd_pcm_action_stop, substream, state);
1276 }
1277 EXPORT_SYMBOL(snd_pcm_stop);
1278
1279 /**
1280 * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1281 * @substream: the PCM substream
1282 *
1283 * After stopping, the state is changed to SETUP.
1284 * Unlike snd_pcm_stop(), this affects only the given stream.
1285 *
1286 * Return: Zero if succesful, or a negative error code.
1287 */
1288 int snd_pcm_drain_done(struct snd_pcm_substream *substream)
1289 {
1290 return snd_pcm_action_single(&snd_pcm_action_stop, substream,
1291 SNDRV_PCM_STATE_SETUP);
1292 }
1293
1294 /**
1295 * snd_pcm_stop_xrun - stop the running streams as XRUN
1296 * @substream: the PCM substream instance
1297 *
1298 * This stops the given running substream (and all linked substreams) as XRUN.
1299 * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1300 *
1301 * Return: Zero if successful, or a negative error code.
1302 */
1303 int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
1304 {
1305 unsigned long flags;
1306 int ret = 0;
1307
1308 snd_pcm_stream_lock_irqsave(substream, flags);
1309 if (snd_pcm_running(substream))
1310 ret = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
1311 snd_pcm_stream_unlock_irqrestore(substream, flags);
1312 return ret;
1313 }
1314 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
1315
1316 /*
1317 * pause callbacks
1318 */
1319 static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
1320 {
1321 struct snd_pcm_runtime *runtime = substream->runtime;
1322 if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
1323 return -ENOSYS;
1324 if (push) {
1325 if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
1326 return -EBADFD;
1327 } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
1328 return -EBADFD;
1329 runtime->trigger_master = substream;
1330 return 0;
1331 }
1332
1333 static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
1334 {
1335 if (substream->runtime->trigger_master != substream)
1336 return 0;
1337 /* some drivers might use hw_ptr to recover from the pause -
1338 update the hw_ptr now */
1339 if (push)
1340 snd_pcm_update_hw_ptr(substream);
1341 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1342 * a delta between the current jiffies, this gives a large enough
1343 * delta, effectively to skip the check once.
1344 */
1345 substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
1346 return substream->ops->trigger(substream,
1347 push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
1348 SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
1349 }
1350
1351 static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
1352 {
1353 if (substream->runtime->trigger_master == substream)
1354 substream->ops->trigger(substream,
1355 push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
1356 SNDRV_PCM_TRIGGER_PAUSE_PUSH);
1357 }
1358
1359 static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
1360 {
1361 struct snd_pcm_runtime *runtime = substream->runtime;
1362 snd_pcm_trigger_tstamp(substream);
1363 if (push) {
1364 runtime->status->state = SNDRV_PCM_STATE_PAUSED;
1365 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
1366 wake_up(&runtime->sleep);
1367 wake_up(&runtime->tsleep);
1368 } else {
1369 runtime->status->state = SNDRV_PCM_STATE_RUNNING;
1370 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
1371 }
1372 }
1373
1374 static const struct action_ops snd_pcm_action_pause = {
1375 .pre_action = snd_pcm_pre_pause,
1376 .do_action = snd_pcm_do_pause,
1377 .undo_action = snd_pcm_undo_pause,
1378 .post_action = snd_pcm_post_pause
1379 };
1380
1381 /*
1382 * Push/release the pause for all linked streams.
1383 */
1384 static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
1385 {
1386 return snd_pcm_action(&snd_pcm_action_pause, substream, push);
1387 }
1388
1389 #ifdef CONFIG_PM
1390 /* suspend */
1391
1392 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
1393 {
1394 struct snd_pcm_runtime *runtime = substream->runtime;
1395 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
1396 return -EBUSY;
1397 runtime->trigger_master = substream;
1398 return 0;
1399 }
1400
1401 static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
1402 {
1403 struct snd_pcm_runtime *runtime = substream->runtime;
1404 if (runtime->trigger_master != substream)
1405 return 0;
1406 if (! snd_pcm_running(substream))
1407 return 0;
1408 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1409 return 0; /* suspend unconditionally */
1410 }
1411
1412 static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
1413 {
1414 struct snd_pcm_runtime *runtime = substream->runtime;
1415 snd_pcm_trigger_tstamp(substream);
1416 runtime->status->suspended_state = runtime->status->state;
1417 runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
1418 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
1419 wake_up(&runtime->sleep);
1420 wake_up(&runtime->tsleep);
1421 }
1422
1423 static const struct action_ops snd_pcm_action_suspend = {
1424 .pre_action = snd_pcm_pre_suspend,
1425 .do_action = snd_pcm_do_suspend,
1426 .post_action = snd_pcm_post_suspend
1427 };
1428
1429 /**
1430 * snd_pcm_suspend - trigger SUSPEND to all linked streams
1431 * @substream: the PCM substream
1432 *
1433 * After this call, all streams are changed to SUSPENDED state.
1434 *
1435 * Return: Zero if successful (or @substream is %NULL), or a negative error
1436 * code.
1437 */
1438 int snd_pcm_suspend(struct snd_pcm_substream *substream)
1439 {
1440 int err;
1441 unsigned long flags;
1442
1443 if (! substream)
1444 return 0;
1445
1446 snd_pcm_stream_lock_irqsave(substream, flags);
1447 err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
1448 snd_pcm_stream_unlock_irqrestore(substream, flags);
1449 return err;
1450 }
1451 EXPORT_SYMBOL(snd_pcm_suspend);
1452
1453 /**
1454 * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1455 * @pcm: the PCM instance
1456 *
1457 * After this call, all streams are changed to SUSPENDED state.
1458 *
1459 * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1460 */
1461 int snd_pcm_suspend_all(struct snd_pcm *pcm)
1462 {
1463 struct snd_pcm_substream *substream;
1464 int stream, err = 0;
1465
1466 if (! pcm)
1467 return 0;
1468
1469 for (stream = 0; stream < 2; stream++) {
1470 for (substream = pcm->streams[stream].substream;
1471 substream; substream = substream->next) {
1472 /* FIXME: the open/close code should lock this as well */
1473 if (substream->runtime == NULL)
1474 continue;
1475 err = snd_pcm_suspend(substream);
1476 if (err < 0 && err != -EBUSY)
1477 return err;
1478 }
1479 }
1480 return 0;
1481 }
1482 EXPORT_SYMBOL(snd_pcm_suspend_all);
1483
1484 /* resume */
1485
1486 static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
1487 {
1488 struct snd_pcm_runtime *runtime = substream->runtime;
1489 if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
1490 return -ENOSYS;
1491 runtime->trigger_master = substream;
1492 return 0;
1493 }
1494
1495 static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
1496 {
1497 struct snd_pcm_runtime *runtime = substream->runtime;
1498 if (runtime->trigger_master != substream)
1499 return 0;
1500 /* DMA not running previously? */
1501 if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING &&
1502 (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING ||
1503 substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
1504 return 0;
1505 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
1506 }
1507
1508 static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
1509 {
1510 if (substream->runtime->trigger_master == substream &&
1511 snd_pcm_running(substream))
1512 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1513 }
1514
1515 static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
1516 {
1517 struct snd_pcm_runtime *runtime = substream->runtime;
1518 snd_pcm_trigger_tstamp(substream);
1519 runtime->status->state = runtime->status->suspended_state;
1520 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
1521 }
1522
1523 static const struct action_ops snd_pcm_action_resume = {
1524 .pre_action = snd_pcm_pre_resume,
1525 .do_action = snd_pcm_do_resume,
1526 .undo_action = snd_pcm_undo_resume,
1527 .post_action = snd_pcm_post_resume
1528 };
1529
1530 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1531 {
1532 return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
1533 }
1534
1535 #else
1536
1537 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1538 {
1539 return -ENOSYS;
1540 }
1541
1542 #endif /* CONFIG_PM */
1543
1544 /*
1545 * xrun ioctl
1546 *
1547 * Change the RUNNING stream(s) to XRUN state.
1548 */
1549 static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1550 {
1551 struct snd_pcm_runtime *runtime = substream->runtime;
1552 int result;
1553
1554 snd_pcm_stream_lock_irq(substream);
1555 switch (runtime->status->state) {
1556 case SNDRV_PCM_STATE_XRUN:
1557 result = 0; /* already there */
1558 break;
1559 case SNDRV_PCM_STATE_RUNNING:
1560 result = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
1561 break;
1562 default:
1563 result = -EBADFD;
1564 }
1565 snd_pcm_stream_unlock_irq(substream);
1566 return result;
1567 }
1568
1569 /*
1570 * reset ioctl
1571 */
1572 static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
1573 {
1574 struct snd_pcm_runtime *runtime = substream->runtime;
1575 switch (runtime->status->state) {
1576 case SNDRV_PCM_STATE_RUNNING:
1577 case SNDRV_PCM_STATE_PREPARED:
1578 case SNDRV_PCM_STATE_PAUSED:
1579 case SNDRV_PCM_STATE_SUSPENDED:
1580 return 0;
1581 default:
1582 return -EBADFD;
1583 }
1584 }
1585
1586 static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
1587 {
1588 struct snd_pcm_runtime *runtime = substream->runtime;
1589 int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
1590 if (err < 0)
1591 return err;
1592 runtime->hw_ptr_base = 0;
1593 runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
1594 runtime->status->hw_ptr % runtime->period_size;
1595 runtime->silence_start = runtime->status->hw_ptr;
1596 runtime->silence_filled = 0;
1597 return 0;
1598 }
1599
1600 static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
1601 {
1602 struct snd_pcm_runtime *runtime = substream->runtime;
1603 runtime->control->appl_ptr = runtime->status->hw_ptr;
1604 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1605 runtime->silence_size > 0)
1606 snd_pcm_playback_silence(substream, ULONG_MAX);
1607 }
1608
1609 static const struct action_ops snd_pcm_action_reset = {
1610 .pre_action = snd_pcm_pre_reset,
1611 .do_action = snd_pcm_do_reset,
1612 .post_action = snd_pcm_post_reset
1613 };
1614
1615 static int snd_pcm_reset(struct snd_pcm_substream *substream)
1616 {
1617 return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
1618 }
1619
1620 /*
1621 * prepare ioctl
1622 */
1623 /* we use the second argument for updating f_flags */
1624 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
1625 int f_flags)
1626 {
1627 struct snd_pcm_runtime *runtime = substream->runtime;
1628 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1629 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1630 return -EBADFD;
1631 if (snd_pcm_running(substream))
1632 return -EBUSY;
1633 substream->f_flags = f_flags;
1634 return 0;
1635 }
1636
1637 static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
1638 {
1639 int err;
1640 err = substream->ops->prepare(substream);
1641 if (err < 0)
1642 return err;
1643 return snd_pcm_do_reset(substream, 0);
1644 }
1645
1646 static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
1647 {
1648 struct snd_pcm_runtime *runtime = substream->runtime;
1649 runtime->control->appl_ptr = runtime->status->hw_ptr;
1650 snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
1651 }
1652
1653 static const struct action_ops snd_pcm_action_prepare = {
1654 .pre_action = snd_pcm_pre_prepare,
1655 .do_action = snd_pcm_do_prepare,
1656 .post_action = snd_pcm_post_prepare
1657 };
1658
1659 /**
1660 * snd_pcm_prepare - prepare the PCM substream to be triggerable
1661 * @substream: the PCM substream instance
1662 * @file: file to refer f_flags
1663 *
1664 * Return: Zero if successful, or a negative error code.
1665 */
1666 static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1667 struct file *file)
1668 {
1669 int f_flags;
1670
1671 if (file)
1672 f_flags = file->f_flags;
1673 else
1674 f_flags = substream->f_flags;
1675
1676 snd_pcm_stream_lock_irq(substream);
1677 switch (substream->runtime->status->state) {
1678 case SNDRV_PCM_STATE_PAUSED:
1679 snd_pcm_pause(substream, 0);
1680 /* fallthru */
1681 case SNDRV_PCM_STATE_SUSPENDED:
1682 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1683 break;
1684 }
1685 snd_pcm_stream_unlock_irq(substream);
1686
1687 return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
1688 substream, f_flags);
1689 }
1690
1691 /*
1692 * drain ioctl
1693 */
1694
1695 static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
1696 {
1697 struct snd_pcm_runtime *runtime = substream->runtime;
1698 switch (runtime->status->state) {
1699 case SNDRV_PCM_STATE_OPEN:
1700 case SNDRV_PCM_STATE_DISCONNECTED:
1701 case SNDRV_PCM_STATE_SUSPENDED:
1702 return -EBADFD;
1703 }
1704 runtime->trigger_master = substream;
1705 return 0;
1706 }
1707
1708 static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
1709 {
1710 struct snd_pcm_runtime *runtime = substream->runtime;
1711 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1712 switch (runtime->status->state) {
1713 case SNDRV_PCM_STATE_PREPARED:
1714 /* start playback stream if possible */
1715 if (! snd_pcm_playback_empty(substream)) {
1716 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
1717 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
1718 } else {
1719 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1720 }
1721 break;
1722 case SNDRV_PCM_STATE_RUNNING:
1723 runtime->status->state = SNDRV_PCM_STATE_DRAINING;
1724 break;
1725 case SNDRV_PCM_STATE_XRUN:
1726 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1727 break;
1728 default:
1729 break;
1730 }
1731 } else {
1732 /* stop running stream */
1733 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
1734 int new_state = snd_pcm_capture_avail(runtime) > 0 ?
1735 SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
1736 snd_pcm_do_stop(substream, new_state);
1737 snd_pcm_post_stop(substream, new_state);
1738 }
1739 }
1740
1741 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
1742 runtime->trigger_master == substream &&
1743 (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
1744 return substream->ops->trigger(substream,
1745 SNDRV_PCM_TRIGGER_DRAIN);
1746
1747 return 0;
1748 }
1749
1750 static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
1751 {
1752 }
1753
1754 static const struct action_ops snd_pcm_action_drain_init = {
1755 .pre_action = snd_pcm_pre_drain_init,
1756 .do_action = snd_pcm_do_drain_init,
1757 .post_action = snd_pcm_post_drain_init
1758 };
1759
1760 static int snd_pcm_drop(struct snd_pcm_substream *substream);
1761
1762 /*
1763 * Drain the stream(s).
1764 * When the substream is linked, sync until the draining of all playback streams
1765 * is finished.
1766 * After this call, all streams are supposed to be either SETUP or DRAINING
1767 * (capture only) state.
1768 */
1769 static int snd_pcm_drain(struct snd_pcm_substream *substream,
1770 struct file *file)
1771 {
1772 struct snd_card *card;
1773 struct snd_pcm_runtime *runtime;
1774 struct snd_pcm_substream *s;
1775 wait_queue_entry_t wait;
1776 int result = 0;
1777 int nonblock = 0;
1778
1779 card = substream->pcm->card;
1780 runtime = substream->runtime;
1781
1782 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1783 return -EBADFD;
1784
1785 if (file) {
1786 if (file->f_flags & O_NONBLOCK)
1787 nonblock = 1;
1788 } else if (substream->f_flags & O_NONBLOCK)
1789 nonblock = 1;
1790
1791 down_read(&snd_pcm_link_rwsem);
1792 snd_pcm_stream_lock_irq(substream);
1793 /* resume pause */
1794 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1795 snd_pcm_pause(substream, 0);
1796
1797 /* pre-start/stop - all running streams are changed to DRAINING state */
1798 result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
1799 if (result < 0)
1800 goto unlock;
1801 /* in non-blocking, we don't wait in ioctl but let caller poll */
1802 if (nonblock) {
1803 result = -EAGAIN;
1804 goto unlock;
1805 }
1806
1807 for (;;) {
1808 long tout;
1809 struct snd_pcm_runtime *to_check;
1810 if (signal_pending(current)) {
1811 result = -ERESTARTSYS;
1812 break;
1813 }
1814 /* find a substream to drain */
1815 to_check = NULL;
1816 snd_pcm_group_for_each_entry(s, substream) {
1817 if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
1818 continue;
1819 runtime = s->runtime;
1820 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
1821 to_check = runtime;
1822 break;
1823 }
1824 }
1825 if (!to_check)
1826 break; /* all drained */
1827 init_waitqueue_entry(&wait, current);
1828 add_wait_queue(&to_check->sleep, &wait);
1829 snd_pcm_stream_unlock_irq(substream);
1830 up_read(&snd_pcm_link_rwsem);
1831 if (runtime->no_period_wakeup)
1832 tout = MAX_SCHEDULE_TIMEOUT;
1833 else {
1834 tout = 10;
1835 if (runtime->rate) {
1836 long t = runtime->period_size * 2 / runtime->rate;
1837 tout = max(t, tout);
1838 }
1839 tout = msecs_to_jiffies(tout * 1000);
1840 }
1841 tout = schedule_timeout_interruptible(tout);
1842 down_read(&snd_pcm_link_rwsem);
1843 snd_pcm_stream_lock_irq(substream);
1844 remove_wait_queue(&to_check->sleep, &wait);
1845 if (card->shutdown) {
1846 result = -ENODEV;
1847 break;
1848 }
1849 if (tout == 0) {
1850 if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
1851 result = -ESTRPIPE;
1852 else {
1853 dev_dbg(substream->pcm->card->dev,
1854 "playback drain error (DMA or IRQ trouble?)\n");
1855 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1856 result = -EIO;
1857 }
1858 break;
1859 }
1860 }
1861
1862 unlock:
1863 snd_pcm_stream_unlock_irq(substream);
1864 up_read(&snd_pcm_link_rwsem);
1865
1866 return result;
1867 }
1868
1869 /*
1870 * drop ioctl
1871 *
1872 * Immediately put all linked substreams into SETUP state.
1873 */
1874 static int snd_pcm_drop(struct snd_pcm_substream *substream)
1875 {
1876 struct snd_pcm_runtime *runtime;
1877 int result = 0;
1878
1879 if (PCM_RUNTIME_CHECK(substream))
1880 return -ENXIO;
1881 runtime = substream->runtime;
1882
1883 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1884 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1885 return -EBADFD;
1886
1887 snd_pcm_stream_lock_irq(substream);
1888 /* resume pause */
1889 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1890 snd_pcm_pause(substream, 0);
1891
1892 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1893 /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
1894 snd_pcm_stream_unlock_irq(substream);
1895
1896 return result;
1897 }
1898
1899
1900 static bool is_pcm_file(struct file *file)
1901 {
1902 struct inode *inode = file_inode(file);
1903 unsigned int minor;
1904
1905 if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
1906 return false;
1907 minor = iminor(inode);
1908 return snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) ||
1909 snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
1910 }
1911
1912 /*
1913 * PCM link handling
1914 */
1915 static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1916 {
1917 int res = 0;
1918 struct snd_pcm_file *pcm_file;
1919 struct snd_pcm_substream *substream1;
1920 struct snd_pcm_group *group;
1921 struct fd f = fdget(fd);
1922
1923 if (!f.file)
1924 return -EBADFD;
1925 if (!is_pcm_file(f.file)) {
1926 res = -EBADFD;
1927 goto _badf;
1928 }
1929 pcm_file = f.file->private_data;
1930 substream1 = pcm_file->substream;
1931 group = kmalloc(sizeof(*group), GFP_KERNEL);
1932 if (!group) {
1933 res = -ENOMEM;
1934 goto _nolock;
1935 }
1936 down_write_nonblock(&snd_pcm_link_rwsem);
1937 write_lock_irq(&snd_pcm_link_rwlock);
1938 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1939 substream->runtime->status->state != substream1->runtime->status->state ||
1940 substream->pcm->nonatomic != substream1->pcm->nonatomic) {
1941 res = -EBADFD;
1942 goto _end;
1943 }
1944 if (snd_pcm_stream_linked(substream1)) {
1945 res = -EALREADY;
1946 goto _end;
1947 }
1948 if (!snd_pcm_stream_linked(substream)) {
1949 substream->group = group;
1950 group = NULL;
1951 spin_lock_init(&substream->group->lock);
1952 mutex_init(&substream->group->mutex);
1953 INIT_LIST_HEAD(&substream->group->substreams);
1954 list_add_tail(&substream->link_list, &substream->group->substreams);
1955 substream->group->count = 1;
1956 }
1957 list_add_tail(&substream1->link_list, &substream->group->substreams);
1958 substream->group->count++;
1959 substream1->group = substream->group;
1960 _end:
1961 write_unlock_irq(&snd_pcm_link_rwlock);
1962 up_write(&snd_pcm_link_rwsem);
1963 _nolock:
1964 snd_card_unref(substream1->pcm->card);
1965 kfree(group);
1966 _badf:
1967 fdput(f);
1968 return res;
1969 }
1970
1971 static void relink_to_local(struct snd_pcm_substream *substream)
1972 {
1973 substream->group = &substream->self_group;
1974 INIT_LIST_HEAD(&substream->self_group.substreams);
1975 list_add_tail(&substream->link_list, &substream->self_group.substreams);
1976 }
1977
1978 static int snd_pcm_unlink(struct snd_pcm_substream *substream)
1979 {
1980 struct snd_pcm_substream *s;
1981 int res = 0;
1982
1983 down_write_nonblock(&snd_pcm_link_rwsem);
1984 write_lock_irq(&snd_pcm_link_rwlock);
1985 if (!snd_pcm_stream_linked(substream)) {
1986 res = -EALREADY;
1987 goto _end;
1988 }
1989 list_del(&substream->link_list);
1990 substream->group->count--;
1991 if (substream->group->count == 1) { /* detach the last stream, too */
1992 snd_pcm_group_for_each_entry(s, substream) {
1993 relink_to_local(s);
1994 break;
1995 }
1996 kfree(substream->group);
1997 }
1998 relink_to_local(substream);
1999 _end:
2000 write_unlock_irq(&snd_pcm_link_rwlock);
2001 up_write(&snd_pcm_link_rwsem);
2002 return res;
2003 }
2004
2005 /*
2006 * hw configurator
2007 */
2008 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
2009 struct snd_pcm_hw_rule *rule)
2010 {
2011 struct snd_interval t;
2012 snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
2013 hw_param_interval_c(params, rule->deps[1]), &t);
2014 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2015 }
2016
2017 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
2018 struct snd_pcm_hw_rule *rule)
2019 {
2020 struct snd_interval t;
2021 snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
2022 hw_param_interval_c(params, rule->deps[1]), &t);
2023 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2024 }
2025
2026 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
2027 struct snd_pcm_hw_rule *rule)
2028 {
2029 struct snd_interval t;
2030 snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
2031 hw_param_interval_c(params, rule->deps[1]),
2032 (unsigned long) rule->private, &t);
2033 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2034 }
2035
2036 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
2037 struct snd_pcm_hw_rule *rule)
2038 {
2039 struct snd_interval t;
2040 snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
2041 (unsigned long) rule->private,
2042 hw_param_interval_c(params, rule->deps[1]), &t);
2043 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2044 }
2045
2046 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
2047 struct snd_pcm_hw_rule *rule)
2048 {
2049 unsigned int k;
2050 const struct snd_interval *i =
2051 hw_param_interval_c(params, rule->deps[0]);
2052 struct snd_mask m;
2053 struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2054 snd_mask_any(&m);
2055 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2056 int bits;
2057 if (! snd_mask_test(mask, k))
2058 continue;
2059 bits = snd_pcm_format_physical_width(k);
2060 if (bits <= 0)
2061 continue; /* ignore invalid formats */
2062 if ((unsigned)bits < i->min || (unsigned)bits > i->max)
2063 snd_mask_reset(&m, k);
2064 }
2065 return snd_mask_refine(mask, &m);
2066 }
2067
2068 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
2069 struct snd_pcm_hw_rule *rule)
2070 {
2071 struct snd_interval t;
2072 unsigned int k;
2073 t.min = UINT_MAX;
2074 t.max = 0;
2075 t.openmin = 0;
2076 t.openmax = 0;
2077 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2078 int bits;
2079 if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
2080 continue;
2081 bits = snd_pcm_format_physical_width(k);
2082 if (bits <= 0)
2083 continue; /* ignore invalid formats */
2084 if (t.min > (unsigned)bits)
2085 t.min = bits;
2086 if (t.max < (unsigned)bits)
2087 t.max = bits;
2088 }
2089 t.integer = 1;
2090 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2091 }
2092
2093 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
2094 #error "Change this table"
2095 #endif
2096
2097 static const unsigned int rates[] = {
2098 5512, 8000, 11025, 16000, 22050, 32000, 44100,
2099 48000, 64000, 88200, 96000, 176400, 192000
2100 };
2101
2102 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
2103 .count = ARRAY_SIZE(rates),
2104 .list = rates,
2105 };
2106
2107 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
2108 struct snd_pcm_hw_rule *rule)
2109 {
2110 struct snd_pcm_hardware *hw = rule->private;
2111 return snd_interval_list(hw_param_interval(params, rule->var),
2112 snd_pcm_known_rates.count,
2113 snd_pcm_known_rates.list, hw->rates);
2114 }
2115
2116 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
2117 struct snd_pcm_hw_rule *rule)
2118 {
2119 struct snd_interval t;
2120 struct snd_pcm_substream *substream = rule->private;
2121 t.min = 0;
2122 t.max = substream->buffer_bytes_max;
2123 t.openmin = 0;
2124 t.openmax = 0;
2125 t.integer = 1;
2126 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2127 }
2128
2129 int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
2130 {
2131 struct snd_pcm_runtime *runtime = substream->runtime;
2132 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
2133 int k, err;
2134
2135 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
2136 snd_mask_any(constrs_mask(constrs, k));
2137 }
2138
2139 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
2140 snd_interval_any(constrs_interval(constrs, k));
2141 }
2142
2143 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
2144 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
2145 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
2146 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
2147 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
2148
2149 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
2150 snd_pcm_hw_rule_format, NULL,
2151 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2152 if (err < 0)
2153 return err;
2154 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2155 snd_pcm_hw_rule_sample_bits, NULL,
2156 SNDRV_PCM_HW_PARAM_FORMAT,
2157 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2158 if (err < 0)
2159 return err;
2160 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2161 snd_pcm_hw_rule_div, NULL,
2162 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2163 if (err < 0)
2164 return err;
2165 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2166 snd_pcm_hw_rule_mul, NULL,
2167 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2168 if (err < 0)
2169 return err;
2170 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2171 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2172 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2173 if (err < 0)
2174 return err;
2175 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2176 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2177 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
2178 if (err < 0)
2179 return err;
2180 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2181 snd_pcm_hw_rule_div, NULL,
2182 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2183 if (err < 0)
2184 return err;
2185 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2186 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2187 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
2188 if (err < 0)
2189 return err;
2190 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2191 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2192 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
2193 if (err < 0)
2194 return err;
2195 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
2196 snd_pcm_hw_rule_div, NULL,
2197 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2198 if (err < 0)
2199 return err;
2200 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2201 snd_pcm_hw_rule_div, NULL,
2202 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2203 if (err < 0)
2204 return err;
2205 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2206 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2207 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2208 if (err < 0)
2209 return err;
2210 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2211 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2212 SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2213 if (err < 0)
2214 return err;
2215 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2216 snd_pcm_hw_rule_mul, NULL,
2217 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2218 if (err < 0)
2219 return err;
2220 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2221 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2222 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2223 if (err < 0)
2224 return err;
2225 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2226 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2227 SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2228 if (err < 0)
2229 return err;
2230 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2231 snd_pcm_hw_rule_muldivk, (void*) 8,
2232 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2233 if (err < 0)
2234 return err;
2235 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2236 snd_pcm_hw_rule_muldivk, (void*) 8,
2237 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2238 if (err < 0)
2239 return err;
2240 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
2241 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2242 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2243 if (err < 0)
2244 return err;
2245 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
2246 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2247 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2248 if (err < 0)
2249 return err;
2250 return 0;
2251 }
2252
2253 int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
2254 {
2255 struct snd_pcm_runtime *runtime = substream->runtime;
2256 struct snd_pcm_hardware *hw = &runtime->hw;
2257 int err;
2258 unsigned int mask = 0;
2259
2260 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2261 mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
2262 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2263 mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
2264 if (hw_support_mmap(substream)) {
2265 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2266 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
2267 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2268 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
2269 if (hw->info & SNDRV_PCM_INFO_COMPLEX)
2270 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX;
2271 }
2272 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
2273 if (err < 0)
2274 return err;
2275
2276 err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
2277 if (err < 0)
2278 return err;
2279
2280 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD);
2281 if (err < 0)
2282 return err;
2283
2284 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
2285 hw->channels_min, hw->channels_max);
2286 if (err < 0)
2287 return err;
2288
2289 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
2290 hw->rate_min, hw->rate_max);
2291 if (err < 0)
2292 return err;
2293
2294 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2295 hw->period_bytes_min, hw->period_bytes_max);
2296 if (err < 0)
2297 return err;
2298
2299 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
2300 hw->periods_min, hw->periods_max);
2301 if (err < 0)
2302 return err;
2303
2304 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2305 hw->period_bytes_min, hw->buffer_bytes_max);
2306 if (err < 0)
2307 return err;
2308
2309 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2310 snd_pcm_hw_rule_buffer_bytes_max, substream,
2311 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
2312 if (err < 0)
2313 return err;
2314
2315 /* FIXME: remove */
2316 if (runtime->dma_bytes) {
2317 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
2318 if (err < 0)
2319 return err;
2320 }
2321
2322 if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
2323 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2324 snd_pcm_hw_rule_rate, hw,
2325 SNDRV_PCM_HW_PARAM_RATE, -1);
2326 if (err < 0)
2327 return err;
2328 }
2329
2330 /* FIXME: this belong to lowlevel */
2331 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
2332
2333 return 0;
2334 }
2335
2336 static void pcm_release_private(struct snd_pcm_substream *substream)
2337 {
2338 snd_pcm_unlink(substream);
2339 }
2340
2341 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2342 {
2343 substream->ref_count--;
2344 if (substream->ref_count > 0)
2345 return;
2346
2347 snd_pcm_drop(substream);
2348 if (substream->hw_opened) {
2349 if (substream->ops->hw_free &&
2350 substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
2351 substream->ops->hw_free(substream);
2352 substream->ops->close(substream);
2353 substream->hw_opened = 0;
2354 }
2355 if (pm_qos_request_active(&substream->latency_pm_qos_req))
2356 pm_qos_remove_request(&substream->latency_pm_qos_req);
2357 if (substream->pcm_release) {
2358 substream->pcm_release(substream);
2359 substream->pcm_release = NULL;
2360 }
2361 snd_pcm_detach_substream(substream);
2362 }
2363 EXPORT_SYMBOL(snd_pcm_release_substream);
2364
2365 int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2366 struct file *file,
2367 struct snd_pcm_substream **rsubstream)
2368 {
2369 struct snd_pcm_substream *substream;
2370 int err;
2371
2372 err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2373 if (err < 0)
2374 return err;
2375 if (substream->ref_count > 1) {
2376 *rsubstream = substream;
2377 return 0;
2378 }
2379
2380 err = snd_pcm_hw_constraints_init(substream);
2381 if (err < 0) {
2382 pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
2383 goto error;
2384 }
2385
2386 if ((err = substream->ops->open(substream)) < 0)
2387 goto error;
2388
2389 substream->hw_opened = 1;
2390
2391 err = snd_pcm_hw_constraints_complete(substream);
2392 if (err < 0) {
2393 pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
2394 goto error;
2395 }
2396
2397 *rsubstream = substream;
2398 return 0;
2399
2400 error:
2401 snd_pcm_release_substream(substream);
2402 return err;
2403 }
2404 EXPORT_SYMBOL(snd_pcm_open_substream);
2405
2406 static int snd_pcm_open_file(struct file *file,
2407 struct snd_pcm *pcm,
2408 int stream)
2409 {
2410 struct snd_pcm_file *pcm_file;
2411 struct snd_pcm_substream *substream;
2412 int err;
2413
2414 err = snd_pcm_open_substream(pcm, stream, file, &substream);
2415 if (err < 0)
2416 return err;
2417
2418 pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2419 if (pcm_file == NULL) {
2420 snd_pcm_release_substream(substream);
2421 return -ENOMEM;
2422 }
2423 pcm_file->substream = substream;
2424 if (substream->ref_count == 1) {
2425 substream->file = pcm_file;
2426 substream->pcm_release = pcm_release_private;
2427 }
2428 file->private_data = pcm_file;
2429
2430 return 0;
2431 }
2432
2433 static int snd_pcm_playback_open(struct inode *inode, struct file *file)
2434 {
2435 struct snd_pcm *pcm;
2436 int err = nonseekable_open(inode, file);
2437 if (err < 0)
2438 return err;
2439 pcm = snd_lookup_minor_data(iminor(inode),
2440 SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2441 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
2442 if (pcm)
2443 snd_card_unref(pcm->card);
2444 return err;
2445 }
2446
2447 static int snd_pcm_capture_open(struct inode *inode, struct file *file)
2448 {
2449 struct snd_pcm *pcm;
2450 int err = nonseekable_open(inode, file);
2451 if (err < 0)
2452 return err;
2453 pcm = snd_lookup_minor_data(iminor(inode),
2454 SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2455 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
2456 if (pcm)
2457 snd_card_unref(pcm->card);
2458 return err;
2459 }
2460
2461 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
2462 {
2463 int err;
2464 wait_queue_entry_t wait;
2465
2466 if (pcm == NULL) {
2467 err = -ENODEV;
2468 goto __error1;
2469 }
2470 err = snd_card_file_add(pcm->card, file);
2471 if (err < 0)
2472 goto __error1;
2473 if (!try_module_get(pcm->card->module)) {
2474 err = -EFAULT;
2475 goto __error2;
2476 }
2477 init_waitqueue_entry(&wait, current);
2478 add_wait_queue(&pcm->open_wait, &wait);
2479 mutex_lock(&pcm->open_mutex);
2480 while (1) {
2481 err = snd_pcm_open_file(file, pcm, stream);
2482 if (err >= 0)
2483 break;
2484 if (err == -EAGAIN) {
2485 if (file->f_flags & O_NONBLOCK) {
2486 err = -EBUSY;
2487 break;
2488 }
2489 } else
2490 break;
2491 set_current_state(TASK_INTERRUPTIBLE);
2492 mutex_unlock(&pcm->open_mutex);
2493 schedule();
2494 mutex_lock(&pcm->open_mutex);
2495 if (pcm->card->shutdown) {
2496 err = -ENODEV;
2497 break;
2498 }
2499 if (signal_pending(current)) {
2500 err = -ERESTARTSYS;
2501 break;
2502 }
2503 }
2504 remove_wait_queue(&pcm->open_wait, &wait);
2505 mutex_unlock(&pcm->open_mutex);
2506 if (err < 0)
2507 goto __error;
2508 return err;
2509
2510 __error:
2511 module_put(pcm->card->module);
2512 __error2:
2513 snd_card_file_remove(pcm->card, file);
2514 __error1:
2515 return err;
2516 }
2517
2518 static int snd_pcm_release(struct inode *inode, struct file *file)
2519 {
2520 struct snd_pcm *pcm;
2521 struct snd_pcm_substream *substream;
2522 struct snd_pcm_file *pcm_file;
2523
2524 pcm_file = file->private_data;
2525 substream = pcm_file->substream;
2526 if (snd_BUG_ON(!substream))
2527 return -ENXIO;
2528 pcm = substream->pcm;
2529 mutex_lock(&pcm->open_mutex);
2530 snd_pcm_release_substream(substream);
2531 kfree(pcm_file);
2532 mutex_unlock(&pcm->open_mutex);
2533 wake_up(&pcm->open_wait);
2534 module_put(pcm->card->module);
2535 snd_card_file_remove(pcm->card, file);
2536 return 0;
2537 }
2538
2539 /* check and update PCM state; return 0 or a negative error
2540 * call this inside PCM lock
2541 */
2542 static int do_pcm_hwsync(struct snd_pcm_substream *substream)
2543 {
2544 switch (substream->runtime->status->state) {
2545 case SNDRV_PCM_STATE_DRAINING:
2546 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
2547 return -EBADFD;
2548 /* Fall through */
2549 case SNDRV_PCM_STATE_RUNNING:
2550 return snd_pcm_update_hw_ptr(substream);
2551 case SNDRV_PCM_STATE_PREPARED:
2552 case SNDRV_PCM_STATE_PAUSED:
2553 return 0;
2554 case SNDRV_PCM_STATE_SUSPENDED:
2555 return -ESTRPIPE;
2556 case SNDRV_PCM_STATE_XRUN:
2557 return -EPIPE;
2558 default:
2559 return -EBADFD;
2560 }
2561 }
2562
2563 /* increase the appl_ptr; returns the processed frames or a negative error */
2564 static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2565 snd_pcm_uframes_t frames,
2566 snd_pcm_sframes_t avail)
2567 {
2568 struct snd_pcm_runtime *runtime = substream->runtime;
2569 snd_pcm_sframes_t appl_ptr;
2570 int ret;
2571
2572 if (avail <= 0)
2573 return 0;
2574 if (frames > (snd_pcm_uframes_t)avail)
2575 frames = avail;
2576 appl_ptr = runtime->control->appl_ptr + frames;
2577 if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
2578 appl_ptr -= runtime->boundary;
2579 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2580 return ret < 0 ? ret : frames;
2581 }
2582
2583 /* decrease the appl_ptr; returns the processed frames or zero for error */
2584 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2585 snd_pcm_uframes_t frames,
2586 snd_pcm_sframes_t avail)
2587 {
2588 struct snd_pcm_runtime *runtime = substream->runtime;
2589 snd_pcm_sframes_t appl_ptr;
2590 int ret;
2591
2592 if (avail <= 0)
2593 return 0;
2594 if (frames > (snd_pcm_uframes_t)avail)
2595 frames = avail;
2596 appl_ptr = runtime->control->appl_ptr - frames;
2597 if (appl_ptr < 0)
2598 appl_ptr += runtime->boundary;
2599 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2600 /* NOTE: we return zero for errors because PulseAudio gets depressed
2601 * upon receiving an error from rewind ioctl and stops processing
2602 * any longer. Returning zero means that no rewind is done, so
2603 * it's not absolutely wrong to answer like that.
2604 */
2605 return ret < 0 ? 0 : frames;
2606 }
2607
2608 static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
2609 snd_pcm_uframes_t frames)
2610 {
2611 struct snd_pcm_runtime *runtime = substream->runtime;
2612 snd_pcm_sframes_t ret;
2613
2614 if (frames == 0)
2615 return 0;
2616
2617 snd_pcm_stream_lock_irq(substream);
2618 ret = do_pcm_hwsync(substream);
2619 if (!ret)
2620 ret = rewind_appl_ptr(substream, frames,
2621 snd_pcm_playback_hw_avail(runtime));
2622 snd_pcm_stream_unlock_irq(substream);
2623 return ret;
2624 }
2625
2626 static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substream,
2627 snd_pcm_uframes_t frames)
2628 {
2629 struct snd_pcm_runtime *runtime = substream->runtime;
2630 snd_pcm_sframes_t ret;
2631
2632 if (frames == 0)
2633 return 0;
2634
2635 snd_pcm_stream_lock_irq(substream);
2636 ret = do_pcm_hwsync(substream);
2637 if (!ret)
2638 ret = rewind_appl_ptr(substream, frames,
2639 snd_pcm_capture_hw_avail(runtime));
2640 snd_pcm_stream_unlock_irq(substream);
2641 return ret;
2642 }
2643
2644 static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *substream,
2645 snd_pcm_uframes_t frames)
2646 {
2647 struct snd_pcm_runtime *runtime = substream->runtime;
2648 snd_pcm_sframes_t ret;
2649
2650 if (frames == 0)
2651 return 0;
2652
2653 snd_pcm_stream_lock_irq(substream);
2654 ret = do_pcm_hwsync(substream);
2655 if (!ret)
2656 ret = forward_appl_ptr(substream, frames,
2657 snd_pcm_playback_avail(runtime));
2658 snd_pcm_stream_unlock_irq(substream);
2659 return ret;
2660 }
2661
2662 static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *substream,
2663 snd_pcm_uframes_t frames)
2664 {
2665 struct snd_pcm_runtime *runtime = substream->runtime;
2666 snd_pcm_sframes_t ret;
2667
2668 if (frames == 0)
2669 return 0;
2670
2671 snd_pcm_stream_lock_irq(substream);
2672 ret = do_pcm_hwsync(substream);
2673 if (!ret)
2674 ret = forward_appl_ptr(substream, frames,
2675 snd_pcm_capture_avail(runtime));
2676 snd_pcm_stream_unlock_irq(substream);
2677 return ret;
2678 }
2679
2680 static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
2681 {
2682 int err;
2683
2684 snd_pcm_stream_lock_irq(substream);
2685 err = do_pcm_hwsync(substream);
2686 snd_pcm_stream_unlock_irq(substream);
2687 return err;
2688 }
2689
2690 static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream)
2691 {
2692 struct snd_pcm_runtime *runtime = substream->runtime;
2693 int err;
2694 snd_pcm_sframes_t n = 0;
2695
2696 snd_pcm_stream_lock_irq(substream);
2697 err = do_pcm_hwsync(substream);
2698 if (!err) {
2699 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2700 n = snd_pcm_playback_hw_avail(runtime);
2701 else
2702 n = snd_pcm_capture_avail(runtime);
2703 n += runtime->delay;
2704 }
2705 snd_pcm_stream_unlock_irq(substream);
2706 return err < 0 ? err : n;
2707 }
2708
2709 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2710 struct snd_pcm_sync_ptr __user *_sync_ptr)
2711 {
2712 struct snd_pcm_runtime *runtime = substream->runtime;
2713 struct snd_pcm_sync_ptr sync_ptr;
2714 volatile struct snd_pcm_mmap_status *status;
2715 volatile struct snd_pcm_mmap_control *control;
2716 int err;
2717
2718 memset(&sync_ptr, 0, sizeof(sync_ptr));
2719 if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
2720 return -EFAULT;
2721 if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
2722 return -EFAULT;
2723 status = runtime->status;
2724 control = runtime->control;
2725 if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
2726 err = snd_pcm_hwsync(substream);
2727 if (err < 0)
2728 return err;
2729 }
2730 snd_pcm_stream_lock_irq(substream);
2731 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
2732 err = pcm_lib_apply_appl_ptr(substream,
2733 sync_ptr.c.control.appl_ptr);
2734 if (err < 0) {
2735 snd_pcm_stream_unlock_irq(substream);
2736 return err;
2737 }
2738 } else {
2739 sync_ptr.c.control.appl_ptr = control->appl_ptr;
2740 }
2741 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
2742 control->avail_min = sync_ptr.c.control.avail_min;
2743 else
2744 sync_ptr.c.control.avail_min = control->avail_min;
2745 sync_ptr.s.status.state = status->state;
2746 sync_ptr.s.status.hw_ptr = status->hw_ptr;
2747 sync_ptr.s.status.tstamp = status->tstamp;
2748 sync_ptr.s.status.suspended_state = status->suspended_state;
2749 snd_pcm_stream_unlock_irq(substream);
2750 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
2751 return -EFAULT;
2752 return 0;
2753 }
2754
2755 static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
2756 {
2757 struct snd_pcm_runtime *runtime = substream->runtime;
2758 int arg;
2759
2760 if (get_user(arg, _arg))
2761 return -EFAULT;
2762 if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
2763 return -EINVAL;
2764 runtime->tstamp_type = arg;
2765 return 0;
2766 }
2767
2768 static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream,
2769 struct snd_xferi __user *_xferi)
2770 {
2771 struct snd_xferi xferi;
2772 struct snd_pcm_runtime *runtime = substream->runtime;
2773 snd_pcm_sframes_t result;
2774
2775 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2776 return -EBADFD;
2777 if (put_user(0, &_xferi->result))
2778 return -EFAULT;
2779 if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
2780 return -EFAULT;
2781 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2782 result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
2783 else
2784 result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
2785 __put_user(result, &_xferi->result);
2786 return result < 0 ? result : 0;
2787 }
2788
2789 static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream,
2790 struct snd_xfern __user *_xfern)
2791 {
2792 struct snd_xfern xfern;
2793 struct snd_pcm_runtime *runtime = substream->runtime;
2794 void *bufs;
2795 snd_pcm_sframes_t result;
2796
2797 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2798 return -EBADFD;
2799 if (runtime->channels > 128)
2800 return -EINVAL;
2801 if (put_user(0, &_xfern->result))
2802 return -EFAULT;
2803 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2804 return -EFAULT;
2805
2806 bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels);
2807 if (IS_ERR(bufs))
2808 return PTR_ERR(bufs);
2809 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2810 result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
2811 else
2812 result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
2813 kfree(bufs);
2814 __put_user(result, &_xfern->result);
2815 return result < 0 ? result : 0;
2816 }
2817
2818 static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream,
2819 snd_pcm_uframes_t __user *_frames)
2820 {
2821 snd_pcm_uframes_t frames;
2822 snd_pcm_sframes_t result;
2823
2824 if (get_user(frames, _frames))
2825 return -EFAULT;
2826 if (put_user(0, _frames))
2827 return -EFAULT;
2828 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2829 result = snd_pcm_playback_rewind(substream, frames);
2830 else
2831 result = snd_pcm_capture_rewind(substream, frames);
2832 __put_user(result, _frames);
2833 return result < 0 ? result : 0;
2834 }
2835
2836 static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream,
2837 snd_pcm_uframes_t __user *_frames)
2838 {
2839 snd_pcm_uframes_t frames;
2840 snd_pcm_sframes_t result;
2841
2842 if (get_user(frames, _frames))
2843 return -EFAULT;
2844 if (put_user(0, _frames))
2845 return -EFAULT;
2846 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2847 result = snd_pcm_playback_forward(substream, frames);
2848 else
2849 result = snd_pcm_capture_forward(substream, frames);
2850 __put_user(result, _frames);
2851 return result < 0 ? result : 0;
2852 }
2853
2854 static int snd_pcm_common_ioctl(struct file *file,
2855 struct snd_pcm_substream *substream,
2856 unsigned int cmd, void __user *arg)
2857 {
2858 struct snd_pcm_file *pcm_file = file->private_data;
2859 int res;
2860
2861 if (PCM_RUNTIME_CHECK(substream))
2862 return -ENXIO;
2863
2864 res = snd_power_wait(substream->pcm->card, SNDRV_CTL_POWER_D0);
2865 if (res < 0)
2866 return res;
2867
2868 switch (cmd) {
2869 case SNDRV_PCM_IOCTL_PVERSION:
2870 return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
2871 case SNDRV_PCM_IOCTL_INFO:
2872 return snd_pcm_info_user(substream, arg);
2873 case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */
2874 return 0;
2875 case SNDRV_PCM_IOCTL_TTSTAMP:
2876 return snd_pcm_tstamp(substream, arg);
2877 case SNDRV_PCM_IOCTL_USER_PVERSION:
2878 if (get_user(pcm_file->user_pversion,
2879 (unsigned int __user *)arg))
2880 return -EFAULT;
2881 return 0;
2882 case SNDRV_PCM_IOCTL_HW_REFINE:
2883 return snd_pcm_hw_refine_user(substream, arg);
2884 case SNDRV_PCM_IOCTL_HW_PARAMS:
2885 return snd_pcm_hw_params_user(substream, arg);
2886 case SNDRV_PCM_IOCTL_HW_FREE:
2887 return snd_pcm_hw_free(substream);
2888 case SNDRV_PCM_IOCTL_SW_PARAMS:
2889 return snd_pcm_sw_params_user(substream, arg);
2890 case SNDRV_PCM_IOCTL_STATUS:
2891 return snd_pcm_status_user(substream, arg, false);
2892 case SNDRV_PCM_IOCTL_STATUS_EXT:
2893 return snd_pcm_status_user(substream, arg, true);
2894 case SNDRV_PCM_IOCTL_CHANNEL_INFO:
2895 return snd_pcm_channel_info_user(substream, arg);
2896 case SNDRV_PCM_IOCTL_PREPARE:
2897 return snd_pcm_prepare(substream, file);
2898 case SNDRV_PCM_IOCTL_RESET:
2899 return snd_pcm_reset(substream);
2900 case SNDRV_PCM_IOCTL_START:
2901 return snd_pcm_start_lock_irq(substream);
2902 case SNDRV_PCM_IOCTL_LINK:
2903 return snd_pcm_link(substream, (int)(unsigned long) arg);
2904 case SNDRV_PCM_IOCTL_UNLINK:
2905 return snd_pcm_unlink(substream);
2906 case SNDRV_PCM_IOCTL_RESUME:
2907 return snd_pcm_resume(substream);
2908 case SNDRV_PCM_IOCTL_XRUN:
2909 return snd_pcm_xrun(substream);
2910 case SNDRV_PCM_IOCTL_HWSYNC:
2911 return snd_pcm_hwsync(substream);
2912 case SNDRV_PCM_IOCTL_DELAY:
2913 {
2914 snd_pcm_sframes_t delay = snd_pcm_delay(substream);
2915 snd_pcm_sframes_t __user *res = arg;
2916
2917 if (delay < 0)
2918 return delay;
2919 if (put_user(delay, res))
2920 return -EFAULT;
2921 return 0;
2922 }
2923 case SNDRV_PCM_IOCTL_SYNC_PTR:
2924 return snd_pcm_sync_ptr(substream, arg);
2925 #ifdef CONFIG_SND_SUPPORT_OLD_API
2926 case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
2927 return snd_pcm_hw_refine_old_user(substream, arg);
2928 case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
2929 return snd_pcm_hw_params_old_user(substream, arg);
2930 #endif
2931 case SNDRV_PCM_IOCTL_DRAIN:
2932 return snd_pcm_drain(substream, file);
2933 case SNDRV_PCM_IOCTL_DROP:
2934 return snd_pcm_drop(substream);
2935 case SNDRV_PCM_IOCTL_PAUSE:
2936 return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
2937 substream,
2938 (int)(unsigned long)arg);
2939 case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
2940 case SNDRV_PCM_IOCTL_READI_FRAMES:
2941 return snd_pcm_xferi_frames_ioctl(substream, arg);
2942 case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
2943 case SNDRV_PCM_IOCTL_READN_FRAMES:
2944 return snd_pcm_xfern_frames_ioctl(substream, arg);
2945 case SNDRV_PCM_IOCTL_REWIND:
2946 return snd_pcm_rewind_ioctl(substream, arg);
2947 case SNDRV_PCM_IOCTL_FORWARD:
2948 return snd_pcm_forward_ioctl(substream, arg);
2949 }
2950 pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
2951 return -ENOTTY;
2952 }
2953
2954 static long snd_pcm_ioctl(struct file *file, unsigned int cmd,
2955 unsigned long arg)
2956 {
2957 struct snd_pcm_file *pcm_file;
2958
2959 pcm_file = file->private_data;
2960
2961 if (((cmd >> 8) & 0xff) != 'A')
2962 return -ENOTTY;
2963
2964 return snd_pcm_common_ioctl(file, pcm_file->substream, cmd,
2965 (void __user *)arg);
2966 }
2967
2968 /**
2969 * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
2970 * @substream: PCM substream
2971 * @cmd: IOCTL cmd
2972 * @arg: IOCTL argument
2973 *
2974 * The function is provided primarily for OSS layer and USB gadget drivers,
2975 * and it allows only the limited set of ioctls (hw_params, sw_params,
2976 * prepare, start, drain, drop, forward).
2977 */
2978 int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
2979 unsigned int cmd, void *arg)
2980 {
2981 snd_pcm_uframes_t *frames = arg;
2982 snd_pcm_sframes_t result;
2983
2984 switch (cmd) {
2985 case SNDRV_PCM_IOCTL_FORWARD:
2986 {
2987 /* provided only for OSS; capture-only and no value returned */
2988 if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
2989 return -EINVAL;
2990 result = snd_pcm_capture_forward(substream, *frames);
2991 return result < 0 ? result : 0;
2992 }
2993 case SNDRV_PCM_IOCTL_HW_PARAMS:
2994 return snd_pcm_hw_params(substream, arg);
2995 case SNDRV_PCM_IOCTL_SW_PARAMS:
2996 return snd_pcm_sw_params(substream, arg);
2997 case SNDRV_PCM_IOCTL_PREPARE:
2998 return snd_pcm_prepare(substream, NULL);
2999 case SNDRV_PCM_IOCTL_START:
3000 return snd_pcm_start_lock_irq(substream);
3001 case SNDRV_PCM_IOCTL_DRAIN:
3002 return snd_pcm_drain(substream, NULL);
3003 case SNDRV_PCM_IOCTL_DROP:
3004 return snd_pcm_drop(substream);
3005 case SNDRV_PCM_IOCTL_DELAY:
3006 {
3007 result = snd_pcm_delay(substream);
3008 if (result < 0)
3009 return result;
3010 *frames = result;
3011 return 0;
3012 }
3013 default:
3014 return -EINVAL;
3015 }
3016 }
3017 EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
3018
3019 static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
3020 loff_t * offset)
3021 {
3022 struct snd_pcm_file *pcm_file;
3023 struct snd_pcm_substream *substream;
3024 struct snd_pcm_runtime *runtime;
3025 snd_pcm_sframes_t result;
3026
3027 pcm_file = file->private_data;
3028 substream = pcm_file->substream;
3029 if (PCM_RUNTIME_CHECK(substream))
3030 return -ENXIO;
3031 runtime = substream->runtime;
3032 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3033 return -EBADFD;
3034 if (!frame_aligned(runtime, count))
3035 return -EINVAL;
3036 count = bytes_to_frames(runtime, count);
3037 result = snd_pcm_lib_read(substream, buf, count);
3038 if (result > 0)
3039 result = frames_to_bytes(runtime, result);
3040 return result;
3041 }
3042
3043 static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
3044 size_t count, loff_t * offset)
3045 {
3046 struct snd_pcm_file *pcm_file;
3047 struct snd_pcm_substream *substream;
3048 struct snd_pcm_runtime *runtime;
3049 snd_pcm_sframes_t result;
3050
3051 pcm_file = file->private_data;
3052 substream = pcm_file->substream;
3053 if (PCM_RUNTIME_CHECK(substream))
3054 return -ENXIO;
3055 runtime = substream->runtime;
3056 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3057 return -EBADFD;
3058 if (!frame_aligned(runtime, count))
3059 return -EINVAL;
3060 count = bytes_to_frames(runtime, count);
3061 result = snd_pcm_lib_write(substream, buf, count);
3062 if (result > 0)
3063 result = frames_to_bytes(runtime, result);
3064 return result;
3065 }
3066
3067 static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
3068 {
3069 struct snd_pcm_file *pcm_file;
3070 struct snd_pcm_substream *substream;
3071 struct snd_pcm_runtime *runtime;
3072 snd_pcm_sframes_t result;
3073 unsigned long i;
3074 void __user **bufs;
3075 snd_pcm_uframes_t frames;
3076
3077 pcm_file = iocb->ki_filp->private_data;
3078 substream = pcm_file->substream;
3079 if (PCM_RUNTIME_CHECK(substream))
3080 return -ENXIO;
3081 runtime = substream->runtime;
3082 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3083 return -EBADFD;
3084 if (!iter_is_iovec(to))
3085 return -EINVAL;
3086 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
3087 return -EINVAL;
3088 if (!frame_aligned(runtime, to->iov->iov_len))
3089 return -EINVAL;
3090 frames = bytes_to_samples(runtime, to->iov->iov_len);
3091 bufs = kmalloc(sizeof(void *) * to->nr_segs, GFP_KERNEL);
3092 if (bufs == NULL)
3093 return -ENOMEM;
3094 for (i = 0; i < to->nr_segs; ++i)
3095 bufs[i] = to->iov[i].iov_base;
3096 result = snd_pcm_lib_readv(substream, bufs, frames);
3097 if (result > 0)
3098 result = frames_to_bytes(runtime, result);
3099 kfree(bufs);
3100 return result;
3101 }
3102
3103 static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
3104 {
3105 struct snd_pcm_file *pcm_file;
3106 struct snd_pcm_substream *substream;
3107 struct snd_pcm_runtime *runtime;
3108 snd_pcm_sframes_t result;
3109 unsigned long i;
3110 void __user **bufs;
3111 snd_pcm_uframes_t frames;
3112
3113 pcm_file = iocb->ki_filp->private_data;
3114 substream = pcm_file->substream;
3115 if (PCM_RUNTIME_CHECK(substream))
3116 return -ENXIO;
3117 runtime = substream->runtime;
3118 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3119 return -EBADFD;
3120 if (!iter_is_iovec(from))
3121 return -EINVAL;
3122 if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
3123 !frame_aligned(runtime, from->iov->iov_len))
3124 return -EINVAL;
3125 frames = bytes_to_samples(runtime, from->iov->iov_len);
3126 bufs = kmalloc(sizeof(void *) * from->nr_segs, GFP_KERNEL);
3127 if (bufs == NULL)
3128 return -ENOMEM;
3129 for (i = 0; i < from->nr_segs; ++i)
3130 bufs[i] = from->iov[i].iov_base;
3131 result = snd_pcm_lib_writev(substream, bufs, frames);
3132 if (result > 0)
3133 result = frames_to_bytes(runtime, result);
3134 kfree(bufs);
3135 return result;
3136 }
3137
3138 static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait)
3139 {
3140 struct snd_pcm_file *pcm_file;
3141 struct snd_pcm_substream *substream;
3142 struct snd_pcm_runtime *runtime;
3143 unsigned int mask;
3144 snd_pcm_uframes_t avail;
3145
3146 pcm_file = file->private_data;
3147
3148 substream = pcm_file->substream;
3149 if (PCM_RUNTIME_CHECK(substream))
3150 return POLLOUT | POLLWRNORM | POLLERR;
3151 runtime = substream->runtime;
3152
3153 poll_wait(file, &runtime->sleep, wait);
3154
3155 snd_pcm_stream_lock_irq(substream);
3156 avail = snd_pcm_playback_avail(runtime);
3157 switch (runtime->status->state) {
3158 case SNDRV_PCM_STATE_RUNNING:
3159 case SNDRV_PCM_STATE_PREPARED:
3160 case SNDRV_PCM_STATE_PAUSED:
3161 if (avail >= runtime->control->avail_min) {
3162 mask = POLLOUT | POLLWRNORM;
3163 break;
3164 }
3165 /* Fall through */
3166 case SNDRV_PCM_STATE_DRAINING:
3167 mask = 0;
3168 break;
3169 default:
3170 mask = POLLOUT | POLLWRNORM | POLLERR;
3171 break;
3172 }
3173 snd_pcm_stream_unlock_irq(substream);
3174 return mask;
3175 }
3176
3177 static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait)
3178 {
3179 struct snd_pcm_file *pcm_file;
3180 struct snd_pcm_substream *substream;
3181 struct snd_pcm_runtime *runtime;
3182 unsigned int mask;
3183 snd_pcm_uframes_t avail;
3184
3185 pcm_file = file->private_data;
3186
3187 substream = pcm_file->substream;
3188 if (PCM_RUNTIME_CHECK(substream))
3189 return POLLIN | POLLRDNORM | POLLERR;
3190 runtime = substream->runtime;
3191
3192 poll_wait(file, &runtime->sleep, wait);
3193
3194 snd_pcm_stream_lock_irq(substream);
3195 avail = snd_pcm_capture_avail(runtime);
3196 switch (runtime->status->state) {
3197 case SNDRV_PCM_STATE_RUNNING:
3198 case SNDRV_PCM_STATE_PREPARED:
3199 case SNDRV_PCM_STATE_PAUSED:
3200 if (avail >= runtime->control->avail_min) {
3201 mask = POLLIN | POLLRDNORM;
3202 break;
3203 }
3204 mask = 0;
3205 break;
3206 case SNDRV_PCM_STATE_DRAINING:
3207 if (avail > 0) {
3208 mask = POLLIN | POLLRDNORM;
3209 break;
3210 }
3211 /* Fall through */
3212 default:
3213 mask = POLLIN | POLLRDNORM | POLLERR;
3214 break;
3215 }
3216 snd_pcm_stream_unlock_irq(substream);
3217 return mask;
3218 }
3219
3220 /*
3221 * mmap support
3222 */
3223
3224 /*
3225 * Only on coherent architectures, we can mmap the status and the control records
3226 * for effcient data transfer. On others, we have to use HWSYNC ioctl...
3227 */
3228 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3229 /*
3230 * mmap status record
3231 */
3232 static int snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3233 {
3234 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3235 struct snd_pcm_runtime *runtime;
3236
3237 if (substream == NULL)
3238 return VM_FAULT_SIGBUS;
3239 runtime = substream->runtime;
3240 vmf->page = virt_to_page(runtime->status);
3241 get_page(vmf->page);
3242 return 0;
3243 }
3244
3245 static const struct vm_operations_struct snd_pcm_vm_ops_status =
3246 {
3247 .fault = snd_pcm_mmap_status_fault,
3248 };
3249
3250 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3251 struct vm_area_struct *area)
3252 {
3253 long size;
3254 if (!(area->vm_flags & VM_READ))
3255 return -EINVAL;
3256 size = area->vm_end - area->vm_start;
3257 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3258 return -EINVAL;
3259 area->vm_ops = &snd_pcm_vm_ops_status;
3260 area->vm_private_data = substream;
3261 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3262 return 0;
3263 }
3264
3265 /*
3266 * mmap control record
3267 */
3268 static int snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3269 {
3270 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3271 struct snd_pcm_runtime *runtime;
3272
3273 if (substream == NULL)
3274 return VM_FAULT_SIGBUS;
3275 runtime = substream->runtime;
3276 vmf->page = virt_to_page(runtime->control);
3277 get_page(vmf->page);
3278 return 0;
3279 }
3280
3281 static const struct vm_operations_struct snd_pcm_vm_ops_control =
3282 {
3283 .fault = snd_pcm_mmap_control_fault,
3284 };
3285
3286 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3287 struct vm_area_struct *area)
3288 {
3289 long size;
3290 if (!(area->vm_flags & VM_READ))
3291 return -EINVAL;
3292 size = area->vm_end - area->vm_start;
3293 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3294 return -EINVAL;
3295 area->vm_ops = &snd_pcm_vm_ops_control;
3296 area->vm_private_data = substream;
3297 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3298 return 0;
3299 }
3300
3301 static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
3302 {
3303 if (pcm_file->no_compat_mmap)
3304 return false;
3305 /* See pcm_control_mmap_allowed() below.
3306 * Since older alsa-lib requires both status and control mmaps to be
3307 * coupled, we have to disable the status mmap for old alsa-lib, too.
3308 */
3309 if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
3310 (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
3311 return false;
3312 return true;
3313 }
3314
3315 static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
3316 {
3317 if (pcm_file->no_compat_mmap)
3318 return false;
3319 /* Disallow the control mmap when SYNC_APPLPTR flag is set;
3320 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3321 * thus it effectively assures the manual update of appl_ptr.
3322 */
3323 if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
3324 return false;
3325 return true;
3326 }
3327
3328 #else /* ! coherent mmap */
3329 /*
3330 * don't support mmap for status and control records.
3331 */
3332 #define pcm_status_mmap_allowed(pcm_file) false
3333 #define pcm_control_mmap_allowed(pcm_file) false
3334
3335 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3336 struct vm_area_struct *area)
3337 {
3338 return -ENXIO;
3339 }
3340 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3341 struct vm_area_struct *area)
3342 {
3343 return -ENXIO;
3344 }
3345 #endif /* coherent mmap */
3346
3347 static inline struct page *
3348 snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
3349 {
3350 void *vaddr = substream->runtime->dma_area + ofs;
3351 return virt_to_page(vaddr);
3352 }
3353
3354 /*
3355 * fault callback for mmapping a RAM page
3356 */
3357 static int snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3358 {
3359 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3360 struct snd_pcm_runtime *runtime;
3361 unsigned long offset;
3362 struct page * page;
3363 size_t dma_bytes;
3364
3365 if (substream == NULL)
3366 return VM_FAULT_SIGBUS;
3367 runtime = substream->runtime;
3368 offset = vmf->pgoff << PAGE_SHIFT;
3369 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3370 if (offset > dma_bytes - PAGE_SIZE)
3371 return VM_FAULT_SIGBUS;
3372 if (substream->ops->page)
3373 page = substream->ops->page(substream, offset);
3374 else
3375 page = snd_pcm_default_page_ops(substream, offset);
3376 if (!page)
3377 return VM_FAULT_SIGBUS;
3378 get_page(page);
3379 vmf->page = page;
3380 return 0;
3381 }
3382
3383 static const struct vm_operations_struct snd_pcm_vm_ops_data = {
3384 .open = snd_pcm_mmap_data_open,
3385 .close = snd_pcm_mmap_data_close,
3386 };
3387
3388 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3389 .open = snd_pcm_mmap_data_open,
3390 .close = snd_pcm_mmap_data_close,
3391 .fault = snd_pcm_mmap_data_fault,
3392 };
3393
3394 /*
3395 * mmap the DMA buffer on RAM
3396 */
3397
3398 /**
3399 * snd_pcm_lib_default_mmap - Default PCM data mmap function
3400 * @substream: PCM substream
3401 * @area: VMA
3402 *
3403 * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
3404 * this function is invoked implicitly.
3405 */
3406 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3407 struct vm_area_struct *area)
3408 {
3409 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3410 #ifdef CONFIG_GENERIC_ALLOCATOR
3411 if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
3412 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
3413 return remap_pfn_range(area, area->vm_start,
3414 substream->dma_buffer.addr >> PAGE_SHIFT,
3415 area->vm_end - area->vm_start, area->vm_page_prot);
3416 }
3417 #endif /* CONFIG_GENERIC_ALLOCATOR */
3418 #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
3419 if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
3420 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
3421 return dma_mmap_coherent(substream->dma_buffer.dev.dev,
3422 area,
3423 substream->runtime->dma_area,
3424 substream->runtime->dma_addr,
3425 area->vm_end - area->vm_start);
3426 #endif /* CONFIG_X86 */
3427 /* mmap with fault handler */
3428 area->vm_ops = &snd_pcm_vm_ops_data_fault;
3429 return 0;
3430 }
3431 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3432
3433 /*
3434 * mmap the DMA buffer on I/O memory area
3435 */
3436 #if SNDRV_PCM_INFO_MMAP_IOMEM
3437 /**
3438 * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3439 * @substream: PCM substream
3440 * @area: VMA
3441 *
3442 * When your hardware uses the iomapped pages as the hardware buffer and
3443 * wants to mmap it, pass this function as mmap pcm_ops. Note that this
3444 * is supposed to work only on limited architectures.
3445 */
3446 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3447 struct vm_area_struct *area)
3448 {
3449 struct snd_pcm_runtime *runtime = substream->runtime;;
3450
3451 area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3452 return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3453 }
3454 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
3455 #endif /* SNDRV_PCM_INFO_MMAP */
3456
3457 /*
3458 * mmap DMA buffer
3459 */
3460 int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
3461 struct vm_area_struct *area)
3462 {
3463 struct snd_pcm_runtime *runtime;
3464 long size;
3465 unsigned long offset;
3466 size_t dma_bytes;
3467 int err;
3468
3469 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3470 if (!(area->vm_flags & (VM_WRITE|VM_READ)))
3471 return -EINVAL;
3472 } else {
3473 if (!(area->vm_flags & VM_READ))
3474 return -EINVAL;
3475 }
3476 runtime = substream->runtime;
3477 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3478 return -EBADFD;
3479 if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
3480 return -ENXIO;
3481 if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
3482 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
3483 return -EINVAL;
3484 size = area->vm_end - area->vm_start;
3485 offset = area->vm_pgoff << PAGE_SHIFT;
3486 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3487 if ((size_t)size > dma_bytes)
3488 return -EINVAL;
3489 if (offset > dma_bytes - size)
3490 return -EINVAL;
3491
3492 area->vm_ops = &snd_pcm_vm_ops_data;
3493 area->vm_private_data = substream;
3494 if (substream->ops->mmap)
3495 err = substream->ops->mmap(substream, area);
3496 else
3497 err = snd_pcm_lib_default_mmap(substream, area);
3498 if (!err)
3499 atomic_inc(&substream->mmap_count);
3500 return err;
3501 }
3502 EXPORT_SYMBOL(snd_pcm_mmap_data);
3503
3504 static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
3505 {
3506 struct snd_pcm_file * pcm_file;
3507 struct snd_pcm_substream *substream;
3508 unsigned long offset;
3509
3510 pcm_file = file->private_data;
3511 substream = pcm_file->substream;
3512 if (PCM_RUNTIME_CHECK(substream))
3513 return -ENXIO;
3514
3515 offset = area->vm_pgoff << PAGE_SHIFT;
3516 switch (offset) {
3517 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3518 if (!pcm_status_mmap_allowed(pcm_file))
3519 return -ENXIO;
3520 return snd_pcm_mmap_status(substream, file, area);
3521 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3522 if (!pcm_control_mmap_allowed(pcm_file))
3523 return -ENXIO;
3524 return snd_pcm_mmap_control(substream, file, area);
3525 default:
3526 return snd_pcm_mmap_data(substream, file, area);
3527 }
3528 return 0;
3529 }
3530
3531 static int snd_pcm_fasync(int fd, struct file * file, int on)
3532 {
3533 struct snd_pcm_file * pcm_file;
3534 struct snd_pcm_substream *substream;
3535 struct snd_pcm_runtime *runtime;
3536
3537 pcm_file = file->private_data;
3538 substream = pcm_file->substream;
3539 if (PCM_RUNTIME_CHECK(substream))
3540 return -ENXIO;
3541 runtime = substream->runtime;
3542 return fasync_helper(fd, file, on, &runtime->fasync);
3543 }
3544
3545 /*
3546 * ioctl32 compat
3547 */
3548 #ifdef CONFIG_COMPAT
3549 #include "pcm_compat.c"
3550 #else
3551 #define snd_pcm_ioctl_compat NULL
3552 #endif
3553
3554 /*
3555 * To be removed helpers to keep binary compatibility
3556 */
3557
3558 #ifdef CONFIG_SND_SUPPORT_OLD_API
3559 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
3560 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
3561
3562 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
3563 struct snd_pcm_hw_params_old *oparams)
3564 {
3565 unsigned int i;
3566
3567 memset(params, 0, sizeof(*params));
3568 params->flags = oparams->flags;
3569 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3570 params->masks[i].bits[0] = oparams->masks[i];
3571 memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
3572 params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
3573 params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
3574 params->info = oparams->info;
3575 params->msbits = oparams->msbits;
3576 params->rate_num = oparams->rate_num;
3577 params->rate_den = oparams->rate_den;
3578 params->fifo_size = oparams->fifo_size;
3579 }
3580
3581 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
3582 struct snd_pcm_hw_params *params)
3583 {
3584 unsigned int i;
3585
3586 memset(oparams, 0, sizeof(*oparams));
3587 oparams->flags = params->flags;
3588 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3589 oparams->masks[i] = params->masks[i].bits[0];
3590 memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
3591 oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
3592 oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
3593 oparams->info = params->info;
3594 oparams->msbits = params->msbits;
3595 oparams->rate_num = params->rate_num;
3596 oparams->rate_den = params->rate_den;
3597 oparams->fifo_size = params->fifo_size;
3598 }
3599
3600 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3601 struct snd_pcm_hw_params_old __user * _oparams)
3602 {
3603 struct snd_pcm_hw_params *params;
3604 struct snd_pcm_hw_params_old *oparams = NULL;
3605 int err;
3606
3607 params = kmalloc(sizeof(*params), GFP_KERNEL);
3608 if (!params)
3609 return -ENOMEM;
3610
3611 oparams = memdup_user(_oparams, sizeof(*oparams));
3612 if (IS_ERR(oparams)) {
3613 err = PTR_ERR(oparams);
3614 goto out;
3615 }
3616 snd_pcm_hw_convert_from_old_params(params, oparams);
3617 err = snd_pcm_hw_refine(substream, params);
3618 if (err < 0)
3619 goto out_old;
3620
3621 err = fixup_unreferenced_params(substream, params);
3622 if (err < 0)
3623 goto out_old;
3624
3625 snd_pcm_hw_convert_to_old_params(oparams, params);
3626 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3627 err = -EFAULT;
3628 out_old:
3629 kfree(oparams);
3630 out:
3631 kfree(params);
3632 return err;
3633 }
3634
3635 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3636 struct snd_pcm_hw_params_old __user * _oparams)
3637 {
3638 struct snd_pcm_hw_params *params;
3639 struct snd_pcm_hw_params_old *oparams = NULL;
3640 int err;
3641
3642 params = kmalloc(sizeof(*params), GFP_KERNEL);
3643 if (!params)
3644 return -ENOMEM;
3645
3646 oparams = memdup_user(_oparams, sizeof(*oparams));
3647 if (IS_ERR(oparams)) {
3648 err = PTR_ERR(oparams);
3649 goto out;
3650 }
3651
3652 snd_pcm_hw_convert_from_old_params(params, oparams);
3653 err = snd_pcm_hw_params(substream, params);
3654 if (err < 0)
3655 goto out_old;
3656
3657 snd_pcm_hw_convert_to_old_params(oparams, params);
3658 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3659 err = -EFAULT;
3660 out_old:
3661 kfree(oparams);
3662 out:
3663 kfree(params);
3664 return err;
3665 }
3666 #endif /* CONFIG_SND_SUPPORT_OLD_API */
3667
3668 #ifndef CONFIG_MMU
3669 static unsigned long snd_pcm_get_unmapped_area(struct file *file,
3670 unsigned long addr,
3671 unsigned long len,
3672 unsigned long pgoff,
3673 unsigned long flags)
3674 {
3675 struct snd_pcm_file *pcm_file = file->private_data;
3676 struct snd_pcm_substream *substream = pcm_file->substream;
3677 struct snd_pcm_runtime *runtime = substream->runtime;
3678 unsigned long offset = pgoff << PAGE_SHIFT;
3679
3680 switch (offset) {
3681 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3682 return (unsigned long)runtime->status;
3683 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3684 return (unsigned long)runtime->control;
3685 default:
3686 return (unsigned long)runtime->dma_area + offset;
3687 }
3688 }
3689 #else
3690 # define snd_pcm_get_unmapped_area NULL
3691 #endif
3692
3693 /*
3694 * Register section
3695 */
3696
3697 const struct file_operations snd_pcm_f_ops[2] = {
3698 {
3699 .owner = THIS_MODULE,
3700 .write = snd_pcm_write,
3701 .write_iter = snd_pcm_writev,
3702 .open = snd_pcm_playback_open,
3703 .release = snd_pcm_release,
3704 .llseek = no_llseek,
3705 .poll = snd_pcm_playback_poll,
3706 .unlocked_ioctl = snd_pcm_ioctl,
3707 .compat_ioctl = snd_pcm_ioctl_compat,
3708 .mmap = snd_pcm_mmap,
3709 .fasync = snd_pcm_fasync,
3710 .get_unmapped_area = snd_pcm_get_unmapped_area,
3711 },
3712 {
3713 .owner = THIS_MODULE,
3714 .read = snd_pcm_read,
3715 .read_iter = snd_pcm_readv,
3716 .open = snd_pcm_capture_open,
3717 .release = snd_pcm_release,
3718 .llseek = no_llseek,
3719 .poll = snd_pcm_capture_poll,
3720 .unlocked_ioctl = snd_pcm_ioctl,
3721 .compat_ioctl = snd_pcm_ioctl_compat,
3722 .mmap = snd_pcm_mmap,
3723 .fasync = snd_pcm_fasync,
3724 .get_unmapped_area = snd_pcm_get_unmapped_area,
3725 }
3726 };