]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - sound/core/pcm_native.c
Merge tag 'mmc-v4.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[mirror_ubuntu-artful-kernel.git] / sound / core / pcm_native.c
1 /*
2 * Digital Audio (PCM) abstract layer
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 *
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/file.h>
25 #include <linux/slab.h>
26 #include <linux/sched/signal.h>
27 #include <linux/time.h>
28 #include <linux/pm_qos.h>
29 #include <linux/io.h>
30 #include <linux/dma-mapping.h>
31 #include <sound/core.h>
32 #include <sound/control.h>
33 #include <sound/info.h>
34 #include <sound/pcm.h>
35 #include <sound/pcm_params.h>
36 #include <sound/timer.h>
37 #include <sound/minors.h>
38 #include <linux/uio.h>
39
40 #include "pcm_local.h"
41
42 #ifdef CONFIG_SND_DEBUG
43 #define CREATE_TRACE_POINTS
44 #include "pcm_param_trace.h"
45 #else
46 #define trace_hw_mask_param_enabled() 0
47 #define trace_hw_interval_param_enabled() 0
48 #define trace_hw_mask_param(substream, type, index, prev, curr)
49 #define trace_hw_interval_param(substream, type, index, prev, curr)
50 #endif
51
52 /*
53 * Compatibility
54 */
55
56 struct snd_pcm_hw_params_old {
57 unsigned int flags;
58 unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
59 SNDRV_PCM_HW_PARAM_ACCESS + 1];
60 struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
61 SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
62 unsigned int rmask;
63 unsigned int cmask;
64 unsigned int info;
65 unsigned int msbits;
66 unsigned int rate_num;
67 unsigned int rate_den;
68 snd_pcm_uframes_t fifo_size;
69 unsigned char reserved[64];
70 };
71
72 #ifdef CONFIG_SND_SUPPORT_OLD_API
73 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
74 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
75
76 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
77 struct snd_pcm_hw_params_old __user * _oparams);
78 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
79 struct snd_pcm_hw_params_old __user * _oparams);
80 #endif
81 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
82
83 /*
84 *
85 */
86
87 static DEFINE_RWLOCK(snd_pcm_link_rwlock);
88 static DECLARE_RWSEM(snd_pcm_link_rwsem);
89
90 /* Writer in rwsem may block readers even during its waiting in queue,
91 * and this may lead to a deadlock when the code path takes read sem
92 * twice (e.g. one in snd_pcm_action_nonatomic() and another in
93 * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
94 * spin until it gets the lock.
95 */
96 static inline void down_write_nonblock(struct rw_semaphore *lock)
97 {
98 while (!down_write_trylock(lock))
99 cond_resched();
100 }
101
102 /**
103 * snd_pcm_stream_lock - Lock the PCM stream
104 * @substream: PCM substream
105 *
106 * This locks the PCM stream's spinlock or mutex depending on the nonatomic
107 * flag of the given substream. This also takes the global link rw lock
108 * (or rw sem), too, for avoiding the race with linked streams.
109 */
110 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
111 {
112 if (substream->pcm->nonatomic) {
113 down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
114 mutex_lock(&substream->self_group.mutex);
115 } else {
116 read_lock(&snd_pcm_link_rwlock);
117 spin_lock(&substream->self_group.lock);
118 }
119 }
120 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
121
122 /**
123 * snd_pcm_stream_lock - Unlock the PCM stream
124 * @substream: PCM substream
125 *
126 * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
127 */
128 void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
129 {
130 if (substream->pcm->nonatomic) {
131 mutex_unlock(&substream->self_group.mutex);
132 up_read(&snd_pcm_link_rwsem);
133 } else {
134 spin_unlock(&substream->self_group.lock);
135 read_unlock(&snd_pcm_link_rwlock);
136 }
137 }
138 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
139
140 /**
141 * snd_pcm_stream_lock_irq - Lock the PCM stream
142 * @substream: PCM substream
143 *
144 * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
145 * IRQ (only when nonatomic is false). In nonatomic case, this is identical
146 * as snd_pcm_stream_lock().
147 */
148 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
149 {
150 if (!substream->pcm->nonatomic)
151 local_irq_disable();
152 snd_pcm_stream_lock(substream);
153 }
154 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
155
156 /**
157 * snd_pcm_stream_unlock_irq - Unlock the PCM stream
158 * @substream: PCM substream
159 *
160 * This is a counter-part of snd_pcm_stream_lock_irq().
161 */
162 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
163 {
164 snd_pcm_stream_unlock(substream);
165 if (!substream->pcm->nonatomic)
166 local_irq_enable();
167 }
168 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
169
170 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
171 {
172 unsigned long flags = 0;
173 if (!substream->pcm->nonatomic)
174 local_irq_save(flags);
175 snd_pcm_stream_lock(substream);
176 return flags;
177 }
178 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
179
180 /**
181 * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
182 * @substream: PCM substream
183 * @flags: irq flags
184 *
185 * This is a counter-part of snd_pcm_stream_lock_irqsave().
186 */
187 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
188 unsigned long flags)
189 {
190 snd_pcm_stream_unlock(substream);
191 if (!substream->pcm->nonatomic)
192 local_irq_restore(flags);
193 }
194 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
195
196 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
197 {
198 struct snd_pcm_runtime *runtime;
199 struct snd_pcm *pcm = substream->pcm;
200 struct snd_pcm_str *pstr = substream->pstr;
201
202 memset(info, 0, sizeof(*info));
203 info->card = pcm->card->number;
204 info->device = pcm->device;
205 info->stream = substream->stream;
206 info->subdevice = substream->number;
207 strlcpy(info->id, pcm->id, sizeof(info->id));
208 strlcpy(info->name, pcm->name, sizeof(info->name));
209 info->dev_class = pcm->dev_class;
210 info->dev_subclass = pcm->dev_subclass;
211 info->subdevices_count = pstr->substream_count;
212 info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
213 strlcpy(info->subname, substream->name, sizeof(info->subname));
214 runtime = substream->runtime;
215
216 return 0;
217 }
218
219 int snd_pcm_info_user(struct snd_pcm_substream *substream,
220 struct snd_pcm_info __user * _info)
221 {
222 struct snd_pcm_info *info;
223 int err;
224
225 info = kmalloc(sizeof(*info), GFP_KERNEL);
226 if (! info)
227 return -ENOMEM;
228 err = snd_pcm_info(substream, info);
229 if (err >= 0) {
230 if (copy_to_user(_info, info, sizeof(*info)))
231 err = -EFAULT;
232 }
233 kfree(info);
234 return err;
235 }
236
237 static bool hw_support_mmap(struct snd_pcm_substream *substream)
238 {
239 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
240 return false;
241 /* architecture supports dma_mmap_coherent()? */
242 #if defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) || !defined(CONFIG_HAS_DMA)
243 if (!substream->ops->mmap &&
244 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
245 return false;
246 #endif
247 return true;
248 }
249
250 static int constrain_mask_params(struct snd_pcm_substream *substream,
251 struct snd_pcm_hw_params *params)
252 {
253 struct snd_pcm_hw_constraints *constrs =
254 &substream->runtime->hw_constraints;
255 struct snd_mask *m;
256 unsigned int k;
257 struct snd_mask old_mask;
258 int changed;
259
260 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
261 m = hw_param_mask(params, k);
262 if (snd_mask_empty(m))
263 return -EINVAL;
264
265 /* This parameter is not requested to change by a caller. */
266 if (!(params->rmask & (1 << k)))
267 continue;
268
269 if (trace_hw_mask_param_enabled())
270 old_mask = *m;
271
272 changed = snd_mask_refine(m, constrs_mask(constrs, k));
273 if (changed < 0)
274 return changed;
275 if (changed == 0)
276 continue;
277
278 /* Set corresponding flag so that the caller gets it. */
279 trace_hw_mask_param(substream, k, 0, &old_mask, m);
280 params->cmask |= 1 << k;
281 }
282
283 return 0;
284 }
285
286 static int constrain_interval_params(struct snd_pcm_substream *substream,
287 struct snd_pcm_hw_params *params)
288 {
289 struct snd_pcm_hw_constraints *constrs =
290 &substream->runtime->hw_constraints;
291 struct snd_interval *i;
292 unsigned int k;
293 struct snd_interval old_interval;
294 int changed;
295
296 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
297 i = hw_param_interval(params, k);
298 if (snd_interval_empty(i))
299 return -EINVAL;
300
301 /* This parameter is not requested to change by a caller. */
302 if (!(params->rmask & (1 << k)))
303 continue;
304
305 if (trace_hw_interval_param_enabled())
306 old_interval = *i;
307
308 changed = snd_interval_refine(i, constrs_interval(constrs, k));
309 if (changed < 0)
310 return changed;
311 if (changed == 0)
312 continue;
313
314 /* Set corresponding flag so that the caller gets it. */
315 trace_hw_interval_param(substream, k, 0, &old_interval, i);
316 params->cmask |= 1 << k;
317 }
318
319 return 0;
320 }
321
322 static int constrain_params_by_rules(struct snd_pcm_substream *substream,
323 struct snd_pcm_hw_params *params)
324 {
325 struct snd_pcm_hw_constraints *constrs =
326 &substream->runtime->hw_constraints;
327 unsigned int k;
328 unsigned int rstamps[constrs->rules_num];
329 unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
330 unsigned int stamp;
331 struct snd_pcm_hw_rule *r;
332 unsigned int d;
333 struct snd_mask old_mask;
334 struct snd_interval old_interval;
335 bool again;
336 int changed;
337
338 /*
339 * Each application of rule has own sequence number.
340 *
341 * Each member of 'rstamps' array represents the sequence number of
342 * recent application of corresponding rule.
343 */
344 for (k = 0; k < constrs->rules_num; k++)
345 rstamps[k] = 0;
346
347 /*
348 * Each member of 'vstamps' array represents the sequence number of
349 * recent application of rule in which corresponding parameters were
350 * changed.
351 *
352 * In initial state, elements corresponding to parameters requested by
353 * a caller is 1. For unrequested parameters, corresponding members
354 * have 0 so that the parameters are never changed anymore.
355 */
356 for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
357 vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
358
359 /* Due to the above design, actual sequence number starts at 2. */
360 stamp = 2;
361 retry:
362 /* Apply all rules in order. */
363 again = false;
364 for (k = 0; k < constrs->rules_num; k++) {
365 r = &constrs->rules[k];
366
367 /*
368 * Check condition bits of this rule. When the rule has
369 * some condition bits, parameter without the bits is
370 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
371 * is an example of the condition bits.
372 */
373 if (r->cond && !(r->cond & params->flags))
374 continue;
375
376 /*
377 * The 'deps' array includes maximum three dependencies
378 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
379 * member of this array is a sentinel and should be
380 * negative value.
381 *
382 * This rule should be processed in this time when dependent
383 * parameters were changed at former applications of the other
384 * rules.
385 */
386 for (d = 0; r->deps[d] >= 0; d++) {
387 if (vstamps[r->deps[d]] > rstamps[k])
388 break;
389 }
390 if (r->deps[d] < 0)
391 continue;
392
393 if (trace_hw_mask_param_enabled()) {
394 if (hw_is_mask(r->var))
395 old_mask = *hw_param_mask(params, r->var);
396 }
397 if (trace_hw_interval_param_enabled()) {
398 if (hw_is_interval(r->var))
399 old_interval = *hw_param_interval(params, r->var);
400 }
401
402 changed = r->func(params, r);
403 if (changed < 0)
404 return changed;
405
406 /*
407 * When the parameter is changed, notify it to the caller
408 * by corresponding returned bit, then preparing for next
409 * iteration.
410 */
411 if (changed && r->var >= 0) {
412 if (hw_is_mask(r->var)) {
413 trace_hw_mask_param(substream, r->var,
414 k + 1, &old_mask,
415 hw_param_mask(params, r->var));
416 }
417 if (hw_is_interval(r->var)) {
418 trace_hw_interval_param(substream, r->var,
419 k + 1, &old_interval,
420 hw_param_interval(params, r->var));
421 }
422
423 params->cmask |= (1 << r->var);
424 vstamps[r->var] = stamp;
425 again = true;
426 }
427
428 rstamps[k] = stamp++;
429 }
430
431 /* Iterate to evaluate all rules till no parameters are changed. */
432 if (again)
433 goto retry;
434
435 return 0;
436 }
437
438 static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
439 struct snd_pcm_hw_params *params)
440 {
441 const struct snd_interval *i;
442 const struct snd_mask *m;
443 int err;
444
445 if (!params->msbits) {
446 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
447 if (snd_interval_single(i))
448 params->msbits = snd_interval_value(i);
449 }
450
451 if (!params->rate_den) {
452 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
453 if (snd_interval_single(i)) {
454 params->rate_num = snd_interval_value(i);
455 params->rate_den = 1;
456 }
457 }
458
459 if (!params->fifo_size) {
460 m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
461 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
462 if (snd_mask_single(m) && snd_interval_single(i)) {
463 err = substream->ops->ioctl(substream,
464 SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
465 if (err < 0)
466 return err;
467 }
468 }
469
470 if (!params->info) {
471 params->info = substream->runtime->hw.info;
472 params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
473 SNDRV_PCM_INFO_DRAIN_TRIGGER);
474 if (!hw_support_mmap(substream))
475 params->info &= ~(SNDRV_PCM_INFO_MMAP |
476 SNDRV_PCM_INFO_MMAP_VALID);
477 }
478
479 return 0;
480 }
481
482 int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
483 struct snd_pcm_hw_params *params)
484 {
485 int err;
486
487 params->info = 0;
488 params->fifo_size = 0;
489 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
490 params->msbits = 0;
491 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
492 params->rate_num = 0;
493 params->rate_den = 0;
494 }
495
496 err = constrain_mask_params(substream, params);
497 if (err < 0)
498 return err;
499
500 err = constrain_interval_params(substream, params);
501 if (err < 0)
502 return err;
503
504 err = constrain_params_by_rules(substream, params);
505 if (err < 0)
506 return err;
507
508 params->rmask = 0;
509
510 return 0;
511 }
512 EXPORT_SYMBOL(snd_pcm_hw_refine);
513
514 static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
515 struct snd_pcm_hw_params __user * _params)
516 {
517 struct snd_pcm_hw_params *params;
518 int err;
519
520 params = memdup_user(_params, sizeof(*params));
521 if (IS_ERR(params))
522 return PTR_ERR(params);
523
524 err = snd_pcm_hw_refine(substream, params);
525 if (err < 0)
526 goto end;
527
528 err = fixup_unreferenced_params(substream, params);
529 if (err < 0)
530 goto end;
531
532 if (copy_to_user(_params, params, sizeof(*params)))
533 err = -EFAULT;
534 end:
535 kfree(params);
536 return err;
537 }
538
539 static int period_to_usecs(struct snd_pcm_runtime *runtime)
540 {
541 int usecs;
542
543 if (! runtime->rate)
544 return -1; /* invalid */
545
546 /* take 75% of period time as the deadline */
547 usecs = (750000 / runtime->rate) * runtime->period_size;
548 usecs += ((750000 % runtime->rate) * runtime->period_size) /
549 runtime->rate;
550
551 return usecs;
552 }
553
554 static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
555 {
556 snd_pcm_stream_lock_irq(substream);
557 if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
558 substream->runtime->status->state = state;
559 snd_pcm_stream_unlock_irq(substream);
560 }
561
562 static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
563 int event)
564 {
565 #ifdef CONFIG_SND_PCM_TIMER
566 if (substream->timer)
567 snd_timer_notify(substream->timer, event,
568 &substream->runtime->trigger_tstamp);
569 #endif
570 }
571
572 /**
573 * snd_pcm_hw_param_choose - choose a configuration defined by @params
574 * @pcm: PCM instance
575 * @params: the hw_params instance
576 *
577 * Choose one configuration from configuration space defined by @params.
578 * The configuration chosen is that obtained fixing in this order:
579 * first access, first format, first subformat, min channels,
580 * min rate, min period time, max buffer size, min tick time
581 *
582 * Return: Zero if successful, or a negative error code on failure.
583 */
584 static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
585 struct snd_pcm_hw_params *params)
586 {
587 static const int vars[] = {
588 SNDRV_PCM_HW_PARAM_ACCESS,
589 SNDRV_PCM_HW_PARAM_FORMAT,
590 SNDRV_PCM_HW_PARAM_SUBFORMAT,
591 SNDRV_PCM_HW_PARAM_CHANNELS,
592 SNDRV_PCM_HW_PARAM_RATE,
593 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
594 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
595 SNDRV_PCM_HW_PARAM_TICK_TIME,
596 -1
597 };
598 const int *v;
599 struct snd_mask old_mask;
600 struct snd_interval old_interval;
601 int changed;
602
603 for (v = vars; *v != -1; v++) {
604 /* Keep old parameter to trace. */
605 if (trace_hw_mask_param_enabled()) {
606 if (hw_is_mask(*v))
607 old_mask = *hw_param_mask(params, *v);
608 }
609 if (trace_hw_interval_param_enabled()) {
610 if (hw_is_interval(*v))
611 old_interval = *hw_param_interval(params, *v);
612 }
613 if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
614 changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
615 else
616 changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
617 if (snd_BUG_ON(changed < 0))
618 return changed;
619 if (changed == 0)
620 continue;
621
622 /* Trace the changed parameter. */
623 if (hw_is_mask(*v)) {
624 trace_hw_mask_param(pcm, *v, 0, &old_mask,
625 hw_param_mask(params, *v));
626 }
627 if (hw_is_interval(*v)) {
628 trace_hw_interval_param(pcm, *v, 0, &old_interval,
629 hw_param_interval(params, *v));
630 }
631 }
632
633 return 0;
634 }
635
636 static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
637 struct snd_pcm_hw_params *params)
638 {
639 struct snd_pcm_runtime *runtime;
640 int err, usecs;
641 unsigned int bits;
642 snd_pcm_uframes_t frames;
643
644 if (PCM_RUNTIME_CHECK(substream))
645 return -ENXIO;
646 runtime = substream->runtime;
647 snd_pcm_stream_lock_irq(substream);
648 switch (runtime->status->state) {
649 case SNDRV_PCM_STATE_OPEN:
650 case SNDRV_PCM_STATE_SETUP:
651 case SNDRV_PCM_STATE_PREPARED:
652 break;
653 default:
654 snd_pcm_stream_unlock_irq(substream);
655 return -EBADFD;
656 }
657 snd_pcm_stream_unlock_irq(substream);
658 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
659 if (!substream->oss.oss)
660 #endif
661 if (atomic_read(&substream->mmap_count))
662 return -EBADFD;
663
664 params->rmask = ~0U;
665 err = snd_pcm_hw_refine(substream, params);
666 if (err < 0)
667 goto _error;
668
669 err = snd_pcm_hw_params_choose(substream, params);
670 if (err < 0)
671 goto _error;
672
673 err = fixup_unreferenced_params(substream, params);
674 if (err < 0)
675 goto _error;
676
677 if (substream->ops->hw_params != NULL) {
678 err = substream->ops->hw_params(substream, params);
679 if (err < 0)
680 goto _error;
681 }
682
683 runtime->access = params_access(params);
684 runtime->format = params_format(params);
685 runtime->subformat = params_subformat(params);
686 runtime->channels = params_channels(params);
687 runtime->rate = params_rate(params);
688 runtime->period_size = params_period_size(params);
689 runtime->periods = params_periods(params);
690 runtime->buffer_size = params_buffer_size(params);
691 runtime->info = params->info;
692 runtime->rate_num = params->rate_num;
693 runtime->rate_den = params->rate_den;
694 runtime->no_period_wakeup =
695 (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
696 (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
697
698 bits = snd_pcm_format_physical_width(runtime->format);
699 runtime->sample_bits = bits;
700 bits *= runtime->channels;
701 runtime->frame_bits = bits;
702 frames = 1;
703 while (bits % 8 != 0) {
704 bits *= 2;
705 frames *= 2;
706 }
707 runtime->byte_align = bits / 8;
708 runtime->min_align = frames;
709
710 /* Default sw params */
711 runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
712 runtime->period_step = 1;
713 runtime->control->avail_min = runtime->period_size;
714 runtime->start_threshold = 1;
715 runtime->stop_threshold = runtime->buffer_size;
716 runtime->silence_threshold = 0;
717 runtime->silence_size = 0;
718 runtime->boundary = runtime->buffer_size;
719 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
720 runtime->boundary *= 2;
721
722 snd_pcm_timer_resolution_change(substream);
723 snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
724
725 if (pm_qos_request_active(&substream->latency_pm_qos_req))
726 pm_qos_remove_request(&substream->latency_pm_qos_req);
727 if ((usecs = period_to_usecs(runtime)) >= 0)
728 pm_qos_add_request(&substream->latency_pm_qos_req,
729 PM_QOS_CPU_DMA_LATENCY, usecs);
730 return 0;
731 _error:
732 /* hardware might be unusable from this time,
733 so we force application to retry to set
734 the correct hardware parameter settings */
735 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
736 if (substream->ops->hw_free != NULL)
737 substream->ops->hw_free(substream);
738 return err;
739 }
740
741 static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
742 struct snd_pcm_hw_params __user * _params)
743 {
744 struct snd_pcm_hw_params *params;
745 int err;
746
747 params = memdup_user(_params, sizeof(*params));
748 if (IS_ERR(params))
749 return PTR_ERR(params);
750
751 err = snd_pcm_hw_params(substream, params);
752 if (err < 0)
753 goto end;
754
755 if (copy_to_user(_params, params, sizeof(*params)))
756 err = -EFAULT;
757 end:
758 kfree(params);
759 return err;
760 }
761
762 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
763 {
764 struct snd_pcm_runtime *runtime;
765 int result = 0;
766
767 if (PCM_RUNTIME_CHECK(substream))
768 return -ENXIO;
769 runtime = substream->runtime;
770 snd_pcm_stream_lock_irq(substream);
771 switch (runtime->status->state) {
772 case SNDRV_PCM_STATE_SETUP:
773 case SNDRV_PCM_STATE_PREPARED:
774 break;
775 default:
776 snd_pcm_stream_unlock_irq(substream);
777 return -EBADFD;
778 }
779 snd_pcm_stream_unlock_irq(substream);
780 if (atomic_read(&substream->mmap_count))
781 return -EBADFD;
782 if (substream->ops->hw_free)
783 result = substream->ops->hw_free(substream);
784 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
785 pm_qos_remove_request(&substream->latency_pm_qos_req);
786 return result;
787 }
788
789 static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
790 struct snd_pcm_sw_params *params)
791 {
792 struct snd_pcm_runtime *runtime;
793 int err;
794
795 if (PCM_RUNTIME_CHECK(substream))
796 return -ENXIO;
797 runtime = substream->runtime;
798 snd_pcm_stream_lock_irq(substream);
799 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
800 snd_pcm_stream_unlock_irq(substream);
801 return -EBADFD;
802 }
803 snd_pcm_stream_unlock_irq(substream);
804
805 if (params->tstamp_mode < 0 ||
806 params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
807 return -EINVAL;
808 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
809 params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
810 return -EINVAL;
811 if (params->avail_min == 0)
812 return -EINVAL;
813 if (params->silence_size >= runtime->boundary) {
814 if (params->silence_threshold != 0)
815 return -EINVAL;
816 } else {
817 if (params->silence_size > params->silence_threshold)
818 return -EINVAL;
819 if (params->silence_threshold > runtime->buffer_size)
820 return -EINVAL;
821 }
822 err = 0;
823 snd_pcm_stream_lock_irq(substream);
824 runtime->tstamp_mode = params->tstamp_mode;
825 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
826 runtime->tstamp_type = params->tstamp_type;
827 runtime->period_step = params->period_step;
828 runtime->control->avail_min = params->avail_min;
829 runtime->start_threshold = params->start_threshold;
830 runtime->stop_threshold = params->stop_threshold;
831 runtime->silence_threshold = params->silence_threshold;
832 runtime->silence_size = params->silence_size;
833 params->boundary = runtime->boundary;
834 if (snd_pcm_running(substream)) {
835 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
836 runtime->silence_size > 0)
837 snd_pcm_playback_silence(substream, ULONG_MAX);
838 err = snd_pcm_update_state(substream, runtime);
839 }
840 snd_pcm_stream_unlock_irq(substream);
841 return err;
842 }
843
844 static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
845 struct snd_pcm_sw_params __user * _params)
846 {
847 struct snd_pcm_sw_params params;
848 int err;
849 if (copy_from_user(&params, _params, sizeof(params)))
850 return -EFAULT;
851 err = snd_pcm_sw_params(substream, &params);
852 if (copy_to_user(_params, &params, sizeof(params)))
853 return -EFAULT;
854 return err;
855 }
856
857 int snd_pcm_status(struct snd_pcm_substream *substream,
858 struct snd_pcm_status *status)
859 {
860 struct snd_pcm_runtime *runtime = substream->runtime;
861
862 snd_pcm_stream_lock_irq(substream);
863
864 snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
865 &runtime->audio_tstamp_config);
866
867 /* backwards compatible behavior */
868 if (runtime->audio_tstamp_config.type_requested ==
869 SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
870 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
871 runtime->audio_tstamp_config.type_requested =
872 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
873 else
874 runtime->audio_tstamp_config.type_requested =
875 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
876 runtime->audio_tstamp_report.valid = 0;
877 } else
878 runtime->audio_tstamp_report.valid = 1;
879
880 status->state = runtime->status->state;
881 status->suspended_state = runtime->status->suspended_state;
882 if (status->state == SNDRV_PCM_STATE_OPEN)
883 goto _end;
884 status->trigger_tstamp = runtime->trigger_tstamp;
885 if (snd_pcm_running(substream)) {
886 snd_pcm_update_hw_ptr(substream);
887 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
888 status->tstamp = runtime->status->tstamp;
889 status->driver_tstamp = runtime->driver_tstamp;
890 status->audio_tstamp =
891 runtime->status->audio_tstamp;
892 if (runtime->audio_tstamp_report.valid == 1)
893 /* backwards compatibility, no report provided in COMPAT mode */
894 snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
895 &status->audio_tstamp_accuracy,
896 &runtime->audio_tstamp_report);
897
898 goto _tstamp_end;
899 }
900 } else {
901 /* get tstamp only in fallback mode and only if enabled */
902 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
903 snd_pcm_gettime(runtime, &status->tstamp);
904 }
905 _tstamp_end:
906 status->appl_ptr = runtime->control->appl_ptr;
907 status->hw_ptr = runtime->status->hw_ptr;
908 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
909 status->avail = snd_pcm_playback_avail(runtime);
910 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
911 runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
912 status->delay = runtime->buffer_size - status->avail;
913 status->delay += runtime->delay;
914 } else
915 status->delay = 0;
916 } else {
917 status->avail = snd_pcm_capture_avail(runtime);
918 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
919 status->delay = status->avail + runtime->delay;
920 else
921 status->delay = 0;
922 }
923 status->avail_max = runtime->avail_max;
924 status->overrange = runtime->overrange;
925 runtime->avail_max = 0;
926 runtime->overrange = 0;
927 _end:
928 snd_pcm_stream_unlock_irq(substream);
929 return 0;
930 }
931
932 static int snd_pcm_status_user(struct snd_pcm_substream *substream,
933 struct snd_pcm_status __user * _status,
934 bool ext)
935 {
936 struct snd_pcm_status status;
937 int res;
938
939 memset(&status, 0, sizeof(status));
940 /*
941 * with extension, parameters are read/write,
942 * get audio_tstamp_data from user,
943 * ignore rest of status structure
944 */
945 if (ext && get_user(status.audio_tstamp_data,
946 (u32 __user *)(&_status->audio_tstamp_data)))
947 return -EFAULT;
948 res = snd_pcm_status(substream, &status);
949 if (res < 0)
950 return res;
951 if (copy_to_user(_status, &status, sizeof(status)))
952 return -EFAULT;
953 return 0;
954 }
955
956 static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
957 struct snd_pcm_channel_info * info)
958 {
959 struct snd_pcm_runtime *runtime;
960 unsigned int channel;
961
962 channel = info->channel;
963 runtime = substream->runtime;
964 snd_pcm_stream_lock_irq(substream);
965 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
966 snd_pcm_stream_unlock_irq(substream);
967 return -EBADFD;
968 }
969 snd_pcm_stream_unlock_irq(substream);
970 if (channel >= runtime->channels)
971 return -EINVAL;
972 memset(info, 0, sizeof(*info));
973 info->channel = channel;
974 return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
975 }
976
977 static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
978 struct snd_pcm_channel_info __user * _info)
979 {
980 struct snd_pcm_channel_info info;
981 int res;
982
983 if (copy_from_user(&info, _info, sizeof(info)))
984 return -EFAULT;
985 res = snd_pcm_channel_info(substream, &info);
986 if (res < 0)
987 return res;
988 if (copy_to_user(_info, &info, sizeof(info)))
989 return -EFAULT;
990 return 0;
991 }
992
993 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
994 {
995 struct snd_pcm_runtime *runtime = substream->runtime;
996 if (runtime->trigger_master == NULL)
997 return;
998 if (runtime->trigger_master == substream) {
999 if (!runtime->trigger_tstamp_latched)
1000 snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
1001 } else {
1002 snd_pcm_trigger_tstamp(runtime->trigger_master);
1003 runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
1004 }
1005 runtime->trigger_master = NULL;
1006 }
1007
1008 struct action_ops {
1009 int (*pre_action)(struct snd_pcm_substream *substream, int state);
1010 int (*do_action)(struct snd_pcm_substream *substream, int state);
1011 void (*undo_action)(struct snd_pcm_substream *substream, int state);
1012 void (*post_action)(struct snd_pcm_substream *substream, int state);
1013 };
1014
1015 /*
1016 * this functions is core for handling of linked stream
1017 * Note: the stream state might be changed also on failure
1018 * Note2: call with calling stream lock + link lock
1019 */
1020 static int snd_pcm_action_group(const struct action_ops *ops,
1021 struct snd_pcm_substream *substream,
1022 int state, int do_lock)
1023 {
1024 struct snd_pcm_substream *s = NULL;
1025 struct snd_pcm_substream *s1;
1026 int res = 0, depth = 1;
1027
1028 snd_pcm_group_for_each_entry(s, substream) {
1029 if (do_lock && s != substream) {
1030 if (s->pcm->nonatomic)
1031 mutex_lock_nested(&s->self_group.mutex, depth);
1032 else
1033 spin_lock_nested(&s->self_group.lock, depth);
1034 depth++;
1035 }
1036 res = ops->pre_action(s, state);
1037 if (res < 0)
1038 goto _unlock;
1039 }
1040 snd_pcm_group_for_each_entry(s, substream) {
1041 res = ops->do_action(s, state);
1042 if (res < 0) {
1043 if (ops->undo_action) {
1044 snd_pcm_group_for_each_entry(s1, substream) {
1045 if (s1 == s) /* failed stream */
1046 break;
1047 ops->undo_action(s1, state);
1048 }
1049 }
1050 s = NULL; /* unlock all */
1051 goto _unlock;
1052 }
1053 }
1054 snd_pcm_group_for_each_entry(s, substream) {
1055 ops->post_action(s, state);
1056 }
1057 _unlock:
1058 if (do_lock) {
1059 /* unlock streams */
1060 snd_pcm_group_for_each_entry(s1, substream) {
1061 if (s1 != substream) {
1062 if (s1->pcm->nonatomic)
1063 mutex_unlock(&s1->self_group.mutex);
1064 else
1065 spin_unlock(&s1->self_group.lock);
1066 }
1067 if (s1 == s) /* end */
1068 break;
1069 }
1070 }
1071 return res;
1072 }
1073
1074 /*
1075 * Note: call with stream lock
1076 */
1077 static int snd_pcm_action_single(const struct action_ops *ops,
1078 struct snd_pcm_substream *substream,
1079 int state)
1080 {
1081 int res;
1082
1083 res = ops->pre_action(substream, state);
1084 if (res < 0)
1085 return res;
1086 res = ops->do_action(substream, state);
1087 if (res == 0)
1088 ops->post_action(substream, state);
1089 else if (ops->undo_action)
1090 ops->undo_action(substream, state);
1091 return res;
1092 }
1093
1094 /*
1095 * Note: call with stream lock
1096 */
1097 static int snd_pcm_action(const struct action_ops *ops,
1098 struct snd_pcm_substream *substream,
1099 int state)
1100 {
1101 int res;
1102
1103 if (!snd_pcm_stream_linked(substream))
1104 return snd_pcm_action_single(ops, substream, state);
1105
1106 if (substream->pcm->nonatomic) {
1107 if (!mutex_trylock(&substream->group->mutex)) {
1108 mutex_unlock(&substream->self_group.mutex);
1109 mutex_lock(&substream->group->mutex);
1110 mutex_lock(&substream->self_group.mutex);
1111 }
1112 res = snd_pcm_action_group(ops, substream, state, 1);
1113 mutex_unlock(&substream->group->mutex);
1114 } else {
1115 if (!spin_trylock(&substream->group->lock)) {
1116 spin_unlock(&substream->self_group.lock);
1117 spin_lock(&substream->group->lock);
1118 spin_lock(&substream->self_group.lock);
1119 }
1120 res = snd_pcm_action_group(ops, substream, state, 1);
1121 spin_unlock(&substream->group->lock);
1122 }
1123 return res;
1124 }
1125
1126 /*
1127 * Note: don't use any locks before
1128 */
1129 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
1130 struct snd_pcm_substream *substream,
1131 int state)
1132 {
1133 int res;
1134
1135 snd_pcm_stream_lock_irq(substream);
1136 res = snd_pcm_action(ops, substream, state);
1137 snd_pcm_stream_unlock_irq(substream);
1138 return res;
1139 }
1140
1141 /*
1142 */
1143 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1144 struct snd_pcm_substream *substream,
1145 int state)
1146 {
1147 int res;
1148
1149 down_read(&snd_pcm_link_rwsem);
1150 if (snd_pcm_stream_linked(substream))
1151 res = snd_pcm_action_group(ops, substream, state, 0);
1152 else
1153 res = snd_pcm_action_single(ops, substream, state);
1154 up_read(&snd_pcm_link_rwsem);
1155 return res;
1156 }
1157
1158 /*
1159 * start callbacks
1160 */
1161 static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
1162 {
1163 struct snd_pcm_runtime *runtime = substream->runtime;
1164 if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
1165 return -EBADFD;
1166 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1167 !snd_pcm_playback_data(substream))
1168 return -EPIPE;
1169 runtime->trigger_tstamp_latched = false;
1170 runtime->trigger_master = substream;
1171 return 0;
1172 }
1173
1174 static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
1175 {
1176 if (substream->runtime->trigger_master != substream)
1177 return 0;
1178 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
1179 }
1180
1181 static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
1182 {
1183 if (substream->runtime->trigger_master == substream)
1184 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1185 }
1186
1187 static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
1188 {
1189 struct snd_pcm_runtime *runtime = substream->runtime;
1190 snd_pcm_trigger_tstamp(substream);
1191 runtime->hw_ptr_jiffies = jiffies;
1192 runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
1193 runtime->rate;
1194 runtime->status->state = state;
1195 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1196 runtime->silence_size > 0)
1197 snd_pcm_playback_silence(substream, ULONG_MAX);
1198 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
1199 }
1200
1201 static const struct action_ops snd_pcm_action_start = {
1202 .pre_action = snd_pcm_pre_start,
1203 .do_action = snd_pcm_do_start,
1204 .undo_action = snd_pcm_undo_start,
1205 .post_action = snd_pcm_post_start
1206 };
1207
1208 /**
1209 * snd_pcm_start - start all linked streams
1210 * @substream: the PCM substream instance
1211 *
1212 * Return: Zero if successful, or a negative error code.
1213 * The stream lock must be acquired before calling this function.
1214 */
1215 int snd_pcm_start(struct snd_pcm_substream *substream)
1216 {
1217 return snd_pcm_action(&snd_pcm_action_start, substream,
1218 SNDRV_PCM_STATE_RUNNING);
1219 }
1220
1221 /* take the stream lock and start the streams */
1222 static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
1223 {
1224 return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
1225 SNDRV_PCM_STATE_RUNNING);
1226 }
1227
1228 /*
1229 * stop callbacks
1230 */
1231 static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
1232 {
1233 struct snd_pcm_runtime *runtime = substream->runtime;
1234 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1235 return -EBADFD;
1236 runtime->trigger_master = substream;
1237 return 0;
1238 }
1239
1240 static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
1241 {
1242 if (substream->runtime->trigger_master == substream &&
1243 snd_pcm_running(substream))
1244 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1245 return 0; /* unconditonally stop all substreams */
1246 }
1247
1248 static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
1249 {
1250 struct snd_pcm_runtime *runtime = substream->runtime;
1251 if (runtime->status->state != state) {
1252 snd_pcm_trigger_tstamp(substream);
1253 runtime->status->state = state;
1254 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
1255 }
1256 wake_up(&runtime->sleep);
1257 wake_up(&runtime->tsleep);
1258 }
1259
1260 static const struct action_ops snd_pcm_action_stop = {
1261 .pre_action = snd_pcm_pre_stop,
1262 .do_action = snd_pcm_do_stop,
1263 .post_action = snd_pcm_post_stop
1264 };
1265
1266 /**
1267 * snd_pcm_stop - try to stop all running streams in the substream group
1268 * @substream: the PCM substream instance
1269 * @state: PCM state after stopping the stream
1270 *
1271 * The state of each stream is then changed to the given state unconditionally.
1272 *
1273 * Return: Zero if successful, or a negative error code.
1274 */
1275 int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
1276 {
1277 return snd_pcm_action(&snd_pcm_action_stop, substream, state);
1278 }
1279 EXPORT_SYMBOL(snd_pcm_stop);
1280
1281 /**
1282 * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1283 * @substream: the PCM substream
1284 *
1285 * After stopping, the state is changed to SETUP.
1286 * Unlike snd_pcm_stop(), this affects only the given stream.
1287 *
1288 * Return: Zero if succesful, or a negative error code.
1289 */
1290 int snd_pcm_drain_done(struct snd_pcm_substream *substream)
1291 {
1292 return snd_pcm_action_single(&snd_pcm_action_stop, substream,
1293 SNDRV_PCM_STATE_SETUP);
1294 }
1295
1296 /**
1297 * snd_pcm_stop_xrun - stop the running streams as XRUN
1298 * @substream: the PCM substream instance
1299 *
1300 * This stops the given running substream (and all linked substreams) as XRUN.
1301 * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1302 *
1303 * Return: Zero if successful, or a negative error code.
1304 */
1305 int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
1306 {
1307 unsigned long flags;
1308 int ret = 0;
1309
1310 snd_pcm_stream_lock_irqsave(substream, flags);
1311 if (snd_pcm_running(substream))
1312 ret = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
1313 snd_pcm_stream_unlock_irqrestore(substream, flags);
1314 return ret;
1315 }
1316 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
1317
1318 /*
1319 * pause callbacks
1320 */
1321 static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
1322 {
1323 struct snd_pcm_runtime *runtime = substream->runtime;
1324 if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
1325 return -ENOSYS;
1326 if (push) {
1327 if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
1328 return -EBADFD;
1329 } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
1330 return -EBADFD;
1331 runtime->trigger_master = substream;
1332 return 0;
1333 }
1334
1335 static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
1336 {
1337 if (substream->runtime->trigger_master != substream)
1338 return 0;
1339 /* some drivers might use hw_ptr to recover from the pause -
1340 update the hw_ptr now */
1341 if (push)
1342 snd_pcm_update_hw_ptr(substream);
1343 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1344 * a delta between the current jiffies, this gives a large enough
1345 * delta, effectively to skip the check once.
1346 */
1347 substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
1348 return substream->ops->trigger(substream,
1349 push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
1350 SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
1351 }
1352
1353 static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
1354 {
1355 if (substream->runtime->trigger_master == substream)
1356 substream->ops->trigger(substream,
1357 push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
1358 SNDRV_PCM_TRIGGER_PAUSE_PUSH);
1359 }
1360
1361 static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
1362 {
1363 struct snd_pcm_runtime *runtime = substream->runtime;
1364 snd_pcm_trigger_tstamp(substream);
1365 if (push) {
1366 runtime->status->state = SNDRV_PCM_STATE_PAUSED;
1367 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
1368 wake_up(&runtime->sleep);
1369 wake_up(&runtime->tsleep);
1370 } else {
1371 runtime->status->state = SNDRV_PCM_STATE_RUNNING;
1372 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
1373 }
1374 }
1375
1376 static const struct action_ops snd_pcm_action_pause = {
1377 .pre_action = snd_pcm_pre_pause,
1378 .do_action = snd_pcm_do_pause,
1379 .undo_action = snd_pcm_undo_pause,
1380 .post_action = snd_pcm_post_pause
1381 };
1382
1383 /*
1384 * Push/release the pause for all linked streams.
1385 */
1386 static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
1387 {
1388 return snd_pcm_action(&snd_pcm_action_pause, substream, push);
1389 }
1390
1391 #ifdef CONFIG_PM
1392 /* suspend */
1393
1394 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
1395 {
1396 struct snd_pcm_runtime *runtime = substream->runtime;
1397 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
1398 return -EBUSY;
1399 runtime->trigger_master = substream;
1400 return 0;
1401 }
1402
1403 static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
1404 {
1405 struct snd_pcm_runtime *runtime = substream->runtime;
1406 if (runtime->trigger_master != substream)
1407 return 0;
1408 if (! snd_pcm_running(substream))
1409 return 0;
1410 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1411 return 0; /* suspend unconditionally */
1412 }
1413
1414 static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
1415 {
1416 struct snd_pcm_runtime *runtime = substream->runtime;
1417 snd_pcm_trigger_tstamp(substream);
1418 runtime->status->suspended_state = runtime->status->state;
1419 runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
1420 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
1421 wake_up(&runtime->sleep);
1422 wake_up(&runtime->tsleep);
1423 }
1424
1425 static const struct action_ops snd_pcm_action_suspend = {
1426 .pre_action = snd_pcm_pre_suspend,
1427 .do_action = snd_pcm_do_suspend,
1428 .post_action = snd_pcm_post_suspend
1429 };
1430
1431 /**
1432 * snd_pcm_suspend - trigger SUSPEND to all linked streams
1433 * @substream: the PCM substream
1434 *
1435 * After this call, all streams are changed to SUSPENDED state.
1436 *
1437 * Return: Zero if successful (or @substream is %NULL), or a negative error
1438 * code.
1439 */
1440 int snd_pcm_suspend(struct snd_pcm_substream *substream)
1441 {
1442 int err;
1443 unsigned long flags;
1444
1445 if (! substream)
1446 return 0;
1447
1448 snd_pcm_stream_lock_irqsave(substream, flags);
1449 err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
1450 snd_pcm_stream_unlock_irqrestore(substream, flags);
1451 return err;
1452 }
1453 EXPORT_SYMBOL(snd_pcm_suspend);
1454
1455 /**
1456 * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1457 * @pcm: the PCM instance
1458 *
1459 * After this call, all streams are changed to SUSPENDED state.
1460 *
1461 * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1462 */
1463 int snd_pcm_suspend_all(struct snd_pcm *pcm)
1464 {
1465 struct snd_pcm_substream *substream;
1466 int stream, err = 0;
1467
1468 if (! pcm)
1469 return 0;
1470
1471 for (stream = 0; stream < 2; stream++) {
1472 for (substream = pcm->streams[stream].substream;
1473 substream; substream = substream->next) {
1474 /* FIXME: the open/close code should lock this as well */
1475 if (substream->runtime == NULL)
1476 continue;
1477 err = snd_pcm_suspend(substream);
1478 if (err < 0 && err != -EBUSY)
1479 return err;
1480 }
1481 }
1482 return 0;
1483 }
1484 EXPORT_SYMBOL(snd_pcm_suspend_all);
1485
1486 /* resume */
1487
1488 static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
1489 {
1490 struct snd_pcm_runtime *runtime = substream->runtime;
1491 if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
1492 return -ENOSYS;
1493 runtime->trigger_master = substream;
1494 return 0;
1495 }
1496
1497 static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
1498 {
1499 struct snd_pcm_runtime *runtime = substream->runtime;
1500 if (runtime->trigger_master != substream)
1501 return 0;
1502 /* DMA not running previously? */
1503 if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING &&
1504 (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING ||
1505 substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
1506 return 0;
1507 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
1508 }
1509
1510 static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
1511 {
1512 if (substream->runtime->trigger_master == substream &&
1513 snd_pcm_running(substream))
1514 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1515 }
1516
1517 static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
1518 {
1519 struct snd_pcm_runtime *runtime = substream->runtime;
1520 snd_pcm_trigger_tstamp(substream);
1521 runtime->status->state = runtime->status->suspended_state;
1522 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
1523 }
1524
1525 static const struct action_ops snd_pcm_action_resume = {
1526 .pre_action = snd_pcm_pre_resume,
1527 .do_action = snd_pcm_do_resume,
1528 .undo_action = snd_pcm_undo_resume,
1529 .post_action = snd_pcm_post_resume
1530 };
1531
1532 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1533 {
1534 return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
1535 }
1536
1537 #else
1538
1539 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1540 {
1541 return -ENOSYS;
1542 }
1543
1544 #endif /* CONFIG_PM */
1545
1546 /*
1547 * xrun ioctl
1548 *
1549 * Change the RUNNING stream(s) to XRUN state.
1550 */
1551 static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1552 {
1553 struct snd_pcm_runtime *runtime = substream->runtime;
1554 int result;
1555
1556 snd_pcm_stream_lock_irq(substream);
1557 switch (runtime->status->state) {
1558 case SNDRV_PCM_STATE_XRUN:
1559 result = 0; /* already there */
1560 break;
1561 case SNDRV_PCM_STATE_RUNNING:
1562 result = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
1563 break;
1564 default:
1565 result = -EBADFD;
1566 }
1567 snd_pcm_stream_unlock_irq(substream);
1568 return result;
1569 }
1570
1571 /*
1572 * reset ioctl
1573 */
1574 static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
1575 {
1576 struct snd_pcm_runtime *runtime = substream->runtime;
1577 switch (runtime->status->state) {
1578 case SNDRV_PCM_STATE_RUNNING:
1579 case SNDRV_PCM_STATE_PREPARED:
1580 case SNDRV_PCM_STATE_PAUSED:
1581 case SNDRV_PCM_STATE_SUSPENDED:
1582 return 0;
1583 default:
1584 return -EBADFD;
1585 }
1586 }
1587
1588 static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
1589 {
1590 struct snd_pcm_runtime *runtime = substream->runtime;
1591 int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
1592 if (err < 0)
1593 return err;
1594 runtime->hw_ptr_base = 0;
1595 runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
1596 runtime->status->hw_ptr % runtime->period_size;
1597 runtime->silence_start = runtime->status->hw_ptr;
1598 runtime->silence_filled = 0;
1599 return 0;
1600 }
1601
1602 static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
1603 {
1604 struct snd_pcm_runtime *runtime = substream->runtime;
1605 runtime->control->appl_ptr = runtime->status->hw_ptr;
1606 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1607 runtime->silence_size > 0)
1608 snd_pcm_playback_silence(substream, ULONG_MAX);
1609 }
1610
1611 static const struct action_ops snd_pcm_action_reset = {
1612 .pre_action = snd_pcm_pre_reset,
1613 .do_action = snd_pcm_do_reset,
1614 .post_action = snd_pcm_post_reset
1615 };
1616
1617 static int snd_pcm_reset(struct snd_pcm_substream *substream)
1618 {
1619 return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
1620 }
1621
1622 /*
1623 * prepare ioctl
1624 */
1625 /* we use the second argument for updating f_flags */
1626 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
1627 int f_flags)
1628 {
1629 struct snd_pcm_runtime *runtime = substream->runtime;
1630 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1631 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1632 return -EBADFD;
1633 if (snd_pcm_running(substream))
1634 return -EBUSY;
1635 substream->f_flags = f_flags;
1636 return 0;
1637 }
1638
1639 static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
1640 {
1641 int err;
1642 err = substream->ops->prepare(substream);
1643 if (err < 0)
1644 return err;
1645 return snd_pcm_do_reset(substream, 0);
1646 }
1647
1648 static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
1649 {
1650 struct snd_pcm_runtime *runtime = substream->runtime;
1651 runtime->control->appl_ptr = runtime->status->hw_ptr;
1652 snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
1653 }
1654
1655 static const struct action_ops snd_pcm_action_prepare = {
1656 .pre_action = snd_pcm_pre_prepare,
1657 .do_action = snd_pcm_do_prepare,
1658 .post_action = snd_pcm_post_prepare
1659 };
1660
1661 /**
1662 * snd_pcm_prepare - prepare the PCM substream to be triggerable
1663 * @substream: the PCM substream instance
1664 * @file: file to refer f_flags
1665 *
1666 * Return: Zero if successful, or a negative error code.
1667 */
1668 static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1669 struct file *file)
1670 {
1671 int f_flags;
1672
1673 if (file)
1674 f_flags = file->f_flags;
1675 else
1676 f_flags = substream->f_flags;
1677
1678 snd_pcm_stream_lock_irq(substream);
1679 switch (substream->runtime->status->state) {
1680 case SNDRV_PCM_STATE_PAUSED:
1681 snd_pcm_pause(substream, 0);
1682 /* fallthru */
1683 case SNDRV_PCM_STATE_SUSPENDED:
1684 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1685 break;
1686 }
1687 snd_pcm_stream_unlock_irq(substream);
1688
1689 return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
1690 substream, f_flags);
1691 }
1692
1693 /*
1694 * drain ioctl
1695 */
1696
1697 static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
1698 {
1699 struct snd_pcm_runtime *runtime = substream->runtime;
1700 switch (runtime->status->state) {
1701 case SNDRV_PCM_STATE_OPEN:
1702 case SNDRV_PCM_STATE_DISCONNECTED:
1703 case SNDRV_PCM_STATE_SUSPENDED:
1704 return -EBADFD;
1705 }
1706 runtime->trigger_master = substream;
1707 return 0;
1708 }
1709
1710 static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
1711 {
1712 struct snd_pcm_runtime *runtime = substream->runtime;
1713 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1714 switch (runtime->status->state) {
1715 case SNDRV_PCM_STATE_PREPARED:
1716 /* start playback stream if possible */
1717 if (! snd_pcm_playback_empty(substream)) {
1718 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
1719 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
1720 } else {
1721 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1722 }
1723 break;
1724 case SNDRV_PCM_STATE_RUNNING:
1725 runtime->status->state = SNDRV_PCM_STATE_DRAINING;
1726 break;
1727 case SNDRV_PCM_STATE_XRUN:
1728 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1729 break;
1730 default:
1731 break;
1732 }
1733 } else {
1734 /* stop running stream */
1735 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
1736 int new_state = snd_pcm_capture_avail(runtime) > 0 ?
1737 SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
1738 snd_pcm_do_stop(substream, new_state);
1739 snd_pcm_post_stop(substream, new_state);
1740 }
1741 }
1742
1743 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
1744 runtime->trigger_master == substream &&
1745 (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
1746 return substream->ops->trigger(substream,
1747 SNDRV_PCM_TRIGGER_DRAIN);
1748
1749 return 0;
1750 }
1751
1752 static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
1753 {
1754 }
1755
1756 static const struct action_ops snd_pcm_action_drain_init = {
1757 .pre_action = snd_pcm_pre_drain_init,
1758 .do_action = snd_pcm_do_drain_init,
1759 .post_action = snd_pcm_post_drain_init
1760 };
1761
1762 static int snd_pcm_drop(struct snd_pcm_substream *substream);
1763
1764 /*
1765 * Drain the stream(s).
1766 * When the substream is linked, sync until the draining of all playback streams
1767 * is finished.
1768 * After this call, all streams are supposed to be either SETUP or DRAINING
1769 * (capture only) state.
1770 */
1771 static int snd_pcm_drain(struct snd_pcm_substream *substream,
1772 struct file *file)
1773 {
1774 struct snd_card *card;
1775 struct snd_pcm_runtime *runtime;
1776 struct snd_pcm_substream *s;
1777 wait_queue_entry_t wait;
1778 int result = 0;
1779 int nonblock = 0;
1780
1781 card = substream->pcm->card;
1782 runtime = substream->runtime;
1783
1784 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1785 return -EBADFD;
1786
1787 if (file) {
1788 if (file->f_flags & O_NONBLOCK)
1789 nonblock = 1;
1790 } else if (substream->f_flags & O_NONBLOCK)
1791 nonblock = 1;
1792
1793 down_read(&snd_pcm_link_rwsem);
1794 snd_pcm_stream_lock_irq(substream);
1795 /* resume pause */
1796 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1797 snd_pcm_pause(substream, 0);
1798
1799 /* pre-start/stop - all running streams are changed to DRAINING state */
1800 result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
1801 if (result < 0)
1802 goto unlock;
1803 /* in non-blocking, we don't wait in ioctl but let caller poll */
1804 if (nonblock) {
1805 result = -EAGAIN;
1806 goto unlock;
1807 }
1808
1809 for (;;) {
1810 long tout;
1811 struct snd_pcm_runtime *to_check;
1812 if (signal_pending(current)) {
1813 result = -ERESTARTSYS;
1814 break;
1815 }
1816 /* find a substream to drain */
1817 to_check = NULL;
1818 snd_pcm_group_for_each_entry(s, substream) {
1819 if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
1820 continue;
1821 runtime = s->runtime;
1822 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
1823 to_check = runtime;
1824 break;
1825 }
1826 }
1827 if (!to_check)
1828 break; /* all drained */
1829 init_waitqueue_entry(&wait, current);
1830 add_wait_queue(&to_check->sleep, &wait);
1831 snd_pcm_stream_unlock_irq(substream);
1832 up_read(&snd_pcm_link_rwsem);
1833 snd_power_unlock(card);
1834 if (runtime->no_period_wakeup)
1835 tout = MAX_SCHEDULE_TIMEOUT;
1836 else {
1837 tout = 10;
1838 if (runtime->rate) {
1839 long t = runtime->period_size * 2 / runtime->rate;
1840 tout = max(t, tout);
1841 }
1842 tout = msecs_to_jiffies(tout * 1000);
1843 }
1844 tout = schedule_timeout_interruptible(tout);
1845 snd_power_lock(card);
1846 down_read(&snd_pcm_link_rwsem);
1847 snd_pcm_stream_lock_irq(substream);
1848 remove_wait_queue(&to_check->sleep, &wait);
1849 if (card->shutdown) {
1850 result = -ENODEV;
1851 break;
1852 }
1853 if (tout == 0) {
1854 if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
1855 result = -ESTRPIPE;
1856 else {
1857 dev_dbg(substream->pcm->card->dev,
1858 "playback drain error (DMA or IRQ trouble?)\n");
1859 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1860 result = -EIO;
1861 }
1862 break;
1863 }
1864 }
1865
1866 unlock:
1867 snd_pcm_stream_unlock_irq(substream);
1868 up_read(&snd_pcm_link_rwsem);
1869
1870 return result;
1871 }
1872
1873 /*
1874 * drop ioctl
1875 *
1876 * Immediately put all linked substreams into SETUP state.
1877 */
1878 static int snd_pcm_drop(struct snd_pcm_substream *substream)
1879 {
1880 struct snd_pcm_runtime *runtime;
1881 int result = 0;
1882
1883 if (PCM_RUNTIME_CHECK(substream))
1884 return -ENXIO;
1885 runtime = substream->runtime;
1886
1887 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1888 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1889 return -EBADFD;
1890
1891 snd_pcm_stream_lock_irq(substream);
1892 /* resume pause */
1893 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1894 snd_pcm_pause(substream, 0);
1895
1896 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1897 /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
1898 snd_pcm_stream_unlock_irq(substream);
1899
1900 return result;
1901 }
1902
1903
1904 static bool is_pcm_file(struct file *file)
1905 {
1906 struct inode *inode = file_inode(file);
1907 unsigned int minor;
1908
1909 if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
1910 return false;
1911 minor = iminor(inode);
1912 return snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) ||
1913 snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
1914 }
1915
1916 /*
1917 * PCM link handling
1918 */
1919 static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1920 {
1921 int res = 0;
1922 struct snd_pcm_file *pcm_file;
1923 struct snd_pcm_substream *substream1;
1924 struct snd_pcm_group *group;
1925 struct fd f = fdget(fd);
1926
1927 if (!f.file)
1928 return -EBADFD;
1929 if (!is_pcm_file(f.file)) {
1930 res = -EBADFD;
1931 goto _badf;
1932 }
1933 pcm_file = f.file->private_data;
1934 substream1 = pcm_file->substream;
1935 group = kmalloc(sizeof(*group), GFP_KERNEL);
1936 if (!group) {
1937 res = -ENOMEM;
1938 goto _nolock;
1939 }
1940 down_write_nonblock(&snd_pcm_link_rwsem);
1941 write_lock_irq(&snd_pcm_link_rwlock);
1942 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1943 substream->runtime->status->state != substream1->runtime->status->state ||
1944 substream->pcm->nonatomic != substream1->pcm->nonatomic) {
1945 res = -EBADFD;
1946 goto _end;
1947 }
1948 if (snd_pcm_stream_linked(substream1)) {
1949 res = -EALREADY;
1950 goto _end;
1951 }
1952 if (!snd_pcm_stream_linked(substream)) {
1953 substream->group = group;
1954 group = NULL;
1955 spin_lock_init(&substream->group->lock);
1956 mutex_init(&substream->group->mutex);
1957 INIT_LIST_HEAD(&substream->group->substreams);
1958 list_add_tail(&substream->link_list, &substream->group->substreams);
1959 substream->group->count = 1;
1960 }
1961 list_add_tail(&substream1->link_list, &substream->group->substreams);
1962 substream->group->count++;
1963 substream1->group = substream->group;
1964 _end:
1965 write_unlock_irq(&snd_pcm_link_rwlock);
1966 up_write(&snd_pcm_link_rwsem);
1967 _nolock:
1968 snd_card_unref(substream1->pcm->card);
1969 kfree(group);
1970 _badf:
1971 fdput(f);
1972 return res;
1973 }
1974
1975 static void relink_to_local(struct snd_pcm_substream *substream)
1976 {
1977 substream->group = &substream->self_group;
1978 INIT_LIST_HEAD(&substream->self_group.substreams);
1979 list_add_tail(&substream->link_list, &substream->self_group.substreams);
1980 }
1981
1982 static int snd_pcm_unlink(struct snd_pcm_substream *substream)
1983 {
1984 struct snd_pcm_substream *s;
1985 int res = 0;
1986
1987 down_write_nonblock(&snd_pcm_link_rwsem);
1988 write_lock_irq(&snd_pcm_link_rwlock);
1989 if (!snd_pcm_stream_linked(substream)) {
1990 res = -EALREADY;
1991 goto _end;
1992 }
1993 list_del(&substream->link_list);
1994 substream->group->count--;
1995 if (substream->group->count == 1) { /* detach the last stream, too */
1996 snd_pcm_group_for_each_entry(s, substream) {
1997 relink_to_local(s);
1998 break;
1999 }
2000 kfree(substream->group);
2001 }
2002 relink_to_local(substream);
2003 _end:
2004 write_unlock_irq(&snd_pcm_link_rwlock);
2005 up_write(&snd_pcm_link_rwsem);
2006 return res;
2007 }
2008
2009 /*
2010 * hw configurator
2011 */
2012 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
2013 struct snd_pcm_hw_rule *rule)
2014 {
2015 struct snd_interval t;
2016 snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
2017 hw_param_interval_c(params, rule->deps[1]), &t);
2018 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2019 }
2020
2021 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
2022 struct snd_pcm_hw_rule *rule)
2023 {
2024 struct snd_interval t;
2025 snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
2026 hw_param_interval_c(params, rule->deps[1]), &t);
2027 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2028 }
2029
2030 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
2031 struct snd_pcm_hw_rule *rule)
2032 {
2033 struct snd_interval t;
2034 snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
2035 hw_param_interval_c(params, rule->deps[1]),
2036 (unsigned long) rule->private, &t);
2037 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2038 }
2039
2040 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
2041 struct snd_pcm_hw_rule *rule)
2042 {
2043 struct snd_interval t;
2044 snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
2045 (unsigned long) rule->private,
2046 hw_param_interval_c(params, rule->deps[1]), &t);
2047 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2048 }
2049
2050 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
2051 struct snd_pcm_hw_rule *rule)
2052 {
2053 unsigned int k;
2054 const struct snd_interval *i =
2055 hw_param_interval_c(params, rule->deps[0]);
2056 struct snd_mask m;
2057 struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2058 snd_mask_any(&m);
2059 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2060 int bits;
2061 if (! snd_mask_test(mask, k))
2062 continue;
2063 bits = snd_pcm_format_physical_width(k);
2064 if (bits <= 0)
2065 continue; /* ignore invalid formats */
2066 if ((unsigned)bits < i->min || (unsigned)bits > i->max)
2067 snd_mask_reset(&m, k);
2068 }
2069 return snd_mask_refine(mask, &m);
2070 }
2071
2072 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
2073 struct snd_pcm_hw_rule *rule)
2074 {
2075 struct snd_interval t;
2076 unsigned int k;
2077 t.min = UINT_MAX;
2078 t.max = 0;
2079 t.openmin = 0;
2080 t.openmax = 0;
2081 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2082 int bits;
2083 if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
2084 continue;
2085 bits = snd_pcm_format_physical_width(k);
2086 if (bits <= 0)
2087 continue; /* ignore invalid formats */
2088 if (t.min > (unsigned)bits)
2089 t.min = bits;
2090 if (t.max < (unsigned)bits)
2091 t.max = bits;
2092 }
2093 t.integer = 1;
2094 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2095 }
2096
2097 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
2098 #error "Change this table"
2099 #endif
2100
2101 static const unsigned int rates[] = {
2102 5512, 8000, 11025, 16000, 22050, 32000, 44100,
2103 48000, 64000, 88200, 96000, 176400, 192000
2104 };
2105
2106 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
2107 .count = ARRAY_SIZE(rates),
2108 .list = rates,
2109 };
2110
2111 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
2112 struct snd_pcm_hw_rule *rule)
2113 {
2114 struct snd_pcm_hardware *hw = rule->private;
2115 return snd_interval_list(hw_param_interval(params, rule->var),
2116 snd_pcm_known_rates.count,
2117 snd_pcm_known_rates.list, hw->rates);
2118 }
2119
2120 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
2121 struct snd_pcm_hw_rule *rule)
2122 {
2123 struct snd_interval t;
2124 struct snd_pcm_substream *substream = rule->private;
2125 t.min = 0;
2126 t.max = substream->buffer_bytes_max;
2127 t.openmin = 0;
2128 t.openmax = 0;
2129 t.integer = 1;
2130 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2131 }
2132
2133 int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
2134 {
2135 struct snd_pcm_runtime *runtime = substream->runtime;
2136 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
2137 int k, err;
2138
2139 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
2140 snd_mask_any(constrs_mask(constrs, k));
2141 }
2142
2143 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
2144 snd_interval_any(constrs_interval(constrs, k));
2145 }
2146
2147 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
2148 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
2149 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
2150 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
2151 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
2152
2153 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
2154 snd_pcm_hw_rule_format, NULL,
2155 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2156 if (err < 0)
2157 return err;
2158 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2159 snd_pcm_hw_rule_sample_bits, NULL,
2160 SNDRV_PCM_HW_PARAM_FORMAT,
2161 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2162 if (err < 0)
2163 return err;
2164 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2165 snd_pcm_hw_rule_div, NULL,
2166 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2167 if (err < 0)
2168 return err;
2169 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2170 snd_pcm_hw_rule_mul, NULL,
2171 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2172 if (err < 0)
2173 return err;
2174 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2175 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2176 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2177 if (err < 0)
2178 return err;
2179 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2180 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2181 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
2182 if (err < 0)
2183 return err;
2184 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2185 snd_pcm_hw_rule_div, NULL,
2186 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2187 if (err < 0)
2188 return err;
2189 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2190 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2191 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
2192 if (err < 0)
2193 return err;
2194 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2195 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2196 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
2197 if (err < 0)
2198 return err;
2199 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
2200 snd_pcm_hw_rule_div, NULL,
2201 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2202 if (err < 0)
2203 return err;
2204 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2205 snd_pcm_hw_rule_div, NULL,
2206 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2207 if (err < 0)
2208 return err;
2209 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2210 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2211 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2212 if (err < 0)
2213 return err;
2214 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2215 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2216 SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2217 if (err < 0)
2218 return err;
2219 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2220 snd_pcm_hw_rule_mul, NULL,
2221 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2222 if (err < 0)
2223 return err;
2224 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2225 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2226 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2227 if (err < 0)
2228 return err;
2229 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2230 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2231 SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2232 if (err < 0)
2233 return err;
2234 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2235 snd_pcm_hw_rule_muldivk, (void*) 8,
2236 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2237 if (err < 0)
2238 return err;
2239 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2240 snd_pcm_hw_rule_muldivk, (void*) 8,
2241 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2242 if (err < 0)
2243 return err;
2244 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
2245 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2246 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2247 if (err < 0)
2248 return err;
2249 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
2250 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2251 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2252 if (err < 0)
2253 return err;
2254 return 0;
2255 }
2256
2257 int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
2258 {
2259 struct snd_pcm_runtime *runtime = substream->runtime;
2260 struct snd_pcm_hardware *hw = &runtime->hw;
2261 int err;
2262 unsigned int mask = 0;
2263
2264 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2265 mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
2266 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2267 mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
2268 if (hw_support_mmap(substream)) {
2269 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2270 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
2271 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2272 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
2273 if (hw->info & SNDRV_PCM_INFO_COMPLEX)
2274 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX;
2275 }
2276 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
2277 if (err < 0)
2278 return err;
2279
2280 err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
2281 if (err < 0)
2282 return err;
2283
2284 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD);
2285 if (err < 0)
2286 return err;
2287
2288 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
2289 hw->channels_min, hw->channels_max);
2290 if (err < 0)
2291 return err;
2292
2293 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
2294 hw->rate_min, hw->rate_max);
2295 if (err < 0)
2296 return err;
2297
2298 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2299 hw->period_bytes_min, hw->period_bytes_max);
2300 if (err < 0)
2301 return err;
2302
2303 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
2304 hw->periods_min, hw->periods_max);
2305 if (err < 0)
2306 return err;
2307
2308 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2309 hw->period_bytes_min, hw->buffer_bytes_max);
2310 if (err < 0)
2311 return err;
2312
2313 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2314 snd_pcm_hw_rule_buffer_bytes_max, substream,
2315 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
2316 if (err < 0)
2317 return err;
2318
2319 /* FIXME: remove */
2320 if (runtime->dma_bytes) {
2321 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
2322 if (err < 0)
2323 return err;
2324 }
2325
2326 if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
2327 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2328 snd_pcm_hw_rule_rate, hw,
2329 SNDRV_PCM_HW_PARAM_RATE, -1);
2330 if (err < 0)
2331 return err;
2332 }
2333
2334 /* FIXME: this belong to lowlevel */
2335 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
2336
2337 return 0;
2338 }
2339
2340 static void pcm_release_private(struct snd_pcm_substream *substream)
2341 {
2342 snd_pcm_unlink(substream);
2343 }
2344
2345 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2346 {
2347 substream->ref_count--;
2348 if (substream->ref_count > 0)
2349 return;
2350
2351 snd_pcm_drop(substream);
2352 if (substream->hw_opened) {
2353 if (substream->ops->hw_free &&
2354 substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
2355 substream->ops->hw_free(substream);
2356 substream->ops->close(substream);
2357 substream->hw_opened = 0;
2358 }
2359 if (pm_qos_request_active(&substream->latency_pm_qos_req))
2360 pm_qos_remove_request(&substream->latency_pm_qos_req);
2361 if (substream->pcm_release) {
2362 substream->pcm_release(substream);
2363 substream->pcm_release = NULL;
2364 }
2365 snd_pcm_detach_substream(substream);
2366 }
2367 EXPORT_SYMBOL(snd_pcm_release_substream);
2368
2369 int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2370 struct file *file,
2371 struct snd_pcm_substream **rsubstream)
2372 {
2373 struct snd_pcm_substream *substream;
2374 int err;
2375
2376 err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2377 if (err < 0)
2378 return err;
2379 if (substream->ref_count > 1) {
2380 *rsubstream = substream;
2381 return 0;
2382 }
2383
2384 err = snd_pcm_hw_constraints_init(substream);
2385 if (err < 0) {
2386 pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
2387 goto error;
2388 }
2389
2390 if ((err = substream->ops->open(substream)) < 0)
2391 goto error;
2392
2393 substream->hw_opened = 1;
2394
2395 err = snd_pcm_hw_constraints_complete(substream);
2396 if (err < 0) {
2397 pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
2398 goto error;
2399 }
2400
2401 *rsubstream = substream;
2402 return 0;
2403
2404 error:
2405 snd_pcm_release_substream(substream);
2406 return err;
2407 }
2408 EXPORT_SYMBOL(snd_pcm_open_substream);
2409
2410 static int snd_pcm_open_file(struct file *file,
2411 struct snd_pcm *pcm,
2412 int stream)
2413 {
2414 struct snd_pcm_file *pcm_file;
2415 struct snd_pcm_substream *substream;
2416 int err;
2417
2418 err = snd_pcm_open_substream(pcm, stream, file, &substream);
2419 if (err < 0)
2420 return err;
2421
2422 pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2423 if (pcm_file == NULL) {
2424 snd_pcm_release_substream(substream);
2425 return -ENOMEM;
2426 }
2427 pcm_file->substream = substream;
2428 if (substream->ref_count == 1) {
2429 substream->file = pcm_file;
2430 substream->pcm_release = pcm_release_private;
2431 }
2432 file->private_data = pcm_file;
2433
2434 return 0;
2435 }
2436
2437 static int snd_pcm_playback_open(struct inode *inode, struct file *file)
2438 {
2439 struct snd_pcm *pcm;
2440 int err = nonseekable_open(inode, file);
2441 if (err < 0)
2442 return err;
2443 pcm = snd_lookup_minor_data(iminor(inode),
2444 SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2445 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
2446 if (pcm)
2447 snd_card_unref(pcm->card);
2448 return err;
2449 }
2450
2451 static int snd_pcm_capture_open(struct inode *inode, struct file *file)
2452 {
2453 struct snd_pcm *pcm;
2454 int err = nonseekable_open(inode, file);
2455 if (err < 0)
2456 return err;
2457 pcm = snd_lookup_minor_data(iminor(inode),
2458 SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2459 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
2460 if (pcm)
2461 snd_card_unref(pcm->card);
2462 return err;
2463 }
2464
2465 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
2466 {
2467 int err;
2468 wait_queue_entry_t wait;
2469
2470 if (pcm == NULL) {
2471 err = -ENODEV;
2472 goto __error1;
2473 }
2474 err = snd_card_file_add(pcm->card, file);
2475 if (err < 0)
2476 goto __error1;
2477 if (!try_module_get(pcm->card->module)) {
2478 err = -EFAULT;
2479 goto __error2;
2480 }
2481 init_waitqueue_entry(&wait, current);
2482 add_wait_queue(&pcm->open_wait, &wait);
2483 mutex_lock(&pcm->open_mutex);
2484 while (1) {
2485 err = snd_pcm_open_file(file, pcm, stream);
2486 if (err >= 0)
2487 break;
2488 if (err == -EAGAIN) {
2489 if (file->f_flags & O_NONBLOCK) {
2490 err = -EBUSY;
2491 break;
2492 }
2493 } else
2494 break;
2495 set_current_state(TASK_INTERRUPTIBLE);
2496 mutex_unlock(&pcm->open_mutex);
2497 schedule();
2498 mutex_lock(&pcm->open_mutex);
2499 if (pcm->card->shutdown) {
2500 err = -ENODEV;
2501 break;
2502 }
2503 if (signal_pending(current)) {
2504 err = -ERESTARTSYS;
2505 break;
2506 }
2507 }
2508 remove_wait_queue(&pcm->open_wait, &wait);
2509 mutex_unlock(&pcm->open_mutex);
2510 if (err < 0)
2511 goto __error;
2512 return err;
2513
2514 __error:
2515 module_put(pcm->card->module);
2516 __error2:
2517 snd_card_file_remove(pcm->card, file);
2518 __error1:
2519 return err;
2520 }
2521
2522 static int snd_pcm_release(struct inode *inode, struct file *file)
2523 {
2524 struct snd_pcm *pcm;
2525 struct snd_pcm_substream *substream;
2526 struct snd_pcm_file *pcm_file;
2527
2528 pcm_file = file->private_data;
2529 substream = pcm_file->substream;
2530 if (snd_BUG_ON(!substream))
2531 return -ENXIO;
2532 pcm = substream->pcm;
2533 mutex_lock(&pcm->open_mutex);
2534 snd_pcm_release_substream(substream);
2535 kfree(pcm_file);
2536 mutex_unlock(&pcm->open_mutex);
2537 wake_up(&pcm->open_wait);
2538 module_put(pcm->card->module);
2539 snd_card_file_remove(pcm->card, file);
2540 return 0;
2541 }
2542
2543 /* check and update PCM state; return 0 or a negative error
2544 * call this inside PCM lock
2545 */
2546 static int do_pcm_hwsync(struct snd_pcm_substream *substream)
2547 {
2548 switch (substream->runtime->status->state) {
2549 case SNDRV_PCM_STATE_DRAINING:
2550 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
2551 return -EBADFD;
2552 /* Fall through */
2553 case SNDRV_PCM_STATE_RUNNING:
2554 return snd_pcm_update_hw_ptr(substream);
2555 case SNDRV_PCM_STATE_PREPARED:
2556 case SNDRV_PCM_STATE_PAUSED:
2557 return 0;
2558 case SNDRV_PCM_STATE_SUSPENDED:
2559 return -ESTRPIPE;
2560 case SNDRV_PCM_STATE_XRUN:
2561 return -EPIPE;
2562 default:
2563 return -EBADFD;
2564 }
2565 }
2566
2567 /* increase the appl_ptr; returns the processed frames or a negative error */
2568 static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2569 snd_pcm_uframes_t frames,
2570 snd_pcm_sframes_t avail)
2571 {
2572 struct snd_pcm_runtime *runtime = substream->runtime;
2573 snd_pcm_sframes_t appl_ptr;
2574 int ret;
2575
2576 if (avail <= 0)
2577 return 0;
2578 if (frames > (snd_pcm_uframes_t)avail)
2579 frames = avail;
2580 appl_ptr = runtime->control->appl_ptr + frames;
2581 if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
2582 appl_ptr -= runtime->boundary;
2583 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2584 return ret < 0 ? ret : frames;
2585 }
2586
2587 /* decrease the appl_ptr; returns the processed frames or a negative error */
2588 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2589 snd_pcm_uframes_t frames,
2590 snd_pcm_sframes_t avail)
2591 {
2592 struct snd_pcm_runtime *runtime = substream->runtime;
2593 snd_pcm_sframes_t appl_ptr;
2594 int ret;
2595
2596 if (avail <= 0)
2597 return 0;
2598 if (frames > (snd_pcm_uframes_t)avail)
2599 frames = avail;
2600 appl_ptr = runtime->control->appl_ptr - frames;
2601 if (appl_ptr < 0)
2602 appl_ptr += runtime->boundary;
2603 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2604 return ret < 0 ? ret : frames;
2605 }
2606
2607 static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
2608 snd_pcm_uframes_t frames)
2609 {
2610 struct snd_pcm_runtime *runtime = substream->runtime;
2611 snd_pcm_sframes_t ret;
2612
2613 if (frames == 0)
2614 return 0;
2615
2616 snd_pcm_stream_lock_irq(substream);
2617 ret = do_pcm_hwsync(substream);
2618 if (!ret)
2619 ret = rewind_appl_ptr(substream, frames,
2620 snd_pcm_playback_hw_avail(runtime));
2621 snd_pcm_stream_unlock_irq(substream);
2622 return ret;
2623 }
2624
2625 static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substream,
2626 snd_pcm_uframes_t frames)
2627 {
2628 struct snd_pcm_runtime *runtime = substream->runtime;
2629 snd_pcm_sframes_t ret;
2630
2631 if (frames == 0)
2632 return 0;
2633
2634 snd_pcm_stream_lock_irq(substream);
2635 ret = do_pcm_hwsync(substream);
2636 if (!ret)
2637 ret = rewind_appl_ptr(substream, frames,
2638 snd_pcm_capture_hw_avail(runtime));
2639 snd_pcm_stream_unlock_irq(substream);
2640 return ret;
2641 }
2642
2643 static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *substream,
2644 snd_pcm_uframes_t frames)
2645 {
2646 struct snd_pcm_runtime *runtime = substream->runtime;
2647 snd_pcm_sframes_t ret;
2648
2649 if (frames == 0)
2650 return 0;
2651
2652 snd_pcm_stream_lock_irq(substream);
2653 ret = do_pcm_hwsync(substream);
2654 if (!ret)
2655 ret = forward_appl_ptr(substream, frames,
2656 snd_pcm_playback_avail(runtime));
2657 snd_pcm_stream_unlock_irq(substream);
2658 return ret;
2659 }
2660
2661 static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *substream,
2662 snd_pcm_uframes_t frames)
2663 {
2664 struct snd_pcm_runtime *runtime = substream->runtime;
2665 snd_pcm_sframes_t ret;
2666
2667 if (frames == 0)
2668 return 0;
2669
2670 snd_pcm_stream_lock_irq(substream);
2671 ret = do_pcm_hwsync(substream);
2672 if (!ret)
2673 ret = forward_appl_ptr(substream, frames,
2674 snd_pcm_capture_avail(runtime));
2675 snd_pcm_stream_unlock_irq(substream);
2676 return ret;
2677 }
2678
2679 static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
2680 {
2681 int err;
2682
2683 snd_pcm_stream_lock_irq(substream);
2684 err = do_pcm_hwsync(substream);
2685 snd_pcm_stream_unlock_irq(substream);
2686 return err;
2687 }
2688
2689 static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream)
2690 {
2691 struct snd_pcm_runtime *runtime = substream->runtime;
2692 int err;
2693 snd_pcm_sframes_t n = 0;
2694
2695 snd_pcm_stream_lock_irq(substream);
2696 err = do_pcm_hwsync(substream);
2697 if (!err) {
2698 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2699 n = snd_pcm_playback_hw_avail(runtime);
2700 else
2701 n = snd_pcm_capture_avail(runtime);
2702 n += runtime->delay;
2703 }
2704 snd_pcm_stream_unlock_irq(substream);
2705 return err < 0 ? err : n;
2706 }
2707
2708 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2709 struct snd_pcm_sync_ptr __user *_sync_ptr)
2710 {
2711 struct snd_pcm_runtime *runtime = substream->runtime;
2712 struct snd_pcm_sync_ptr sync_ptr;
2713 volatile struct snd_pcm_mmap_status *status;
2714 volatile struct snd_pcm_mmap_control *control;
2715 int err;
2716
2717 memset(&sync_ptr, 0, sizeof(sync_ptr));
2718 if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
2719 return -EFAULT;
2720 if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
2721 return -EFAULT;
2722 status = runtime->status;
2723 control = runtime->control;
2724 if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
2725 err = snd_pcm_hwsync(substream);
2726 if (err < 0)
2727 return err;
2728 }
2729 snd_pcm_stream_lock_irq(substream);
2730 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
2731 err = pcm_lib_apply_appl_ptr(substream,
2732 sync_ptr.c.control.appl_ptr);
2733 if (err < 0) {
2734 snd_pcm_stream_unlock_irq(substream);
2735 return err;
2736 }
2737 } else {
2738 sync_ptr.c.control.appl_ptr = control->appl_ptr;
2739 }
2740 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
2741 control->avail_min = sync_ptr.c.control.avail_min;
2742 else
2743 sync_ptr.c.control.avail_min = control->avail_min;
2744 sync_ptr.s.status.state = status->state;
2745 sync_ptr.s.status.hw_ptr = status->hw_ptr;
2746 sync_ptr.s.status.tstamp = status->tstamp;
2747 sync_ptr.s.status.suspended_state = status->suspended_state;
2748 snd_pcm_stream_unlock_irq(substream);
2749 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
2750 return -EFAULT;
2751 return 0;
2752 }
2753
2754 static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
2755 {
2756 struct snd_pcm_runtime *runtime = substream->runtime;
2757 int arg;
2758
2759 if (get_user(arg, _arg))
2760 return -EFAULT;
2761 if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
2762 return -EINVAL;
2763 runtime->tstamp_type = arg;
2764 return 0;
2765 }
2766
2767 static int snd_pcm_common_ioctl(struct file *file,
2768 struct snd_pcm_substream *substream,
2769 unsigned int cmd, void __user *arg)
2770 {
2771 struct snd_pcm_file *pcm_file = file->private_data;
2772
2773 switch (cmd) {
2774 case SNDRV_PCM_IOCTL_PVERSION:
2775 return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
2776 case SNDRV_PCM_IOCTL_INFO:
2777 return snd_pcm_info_user(substream, arg);
2778 case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */
2779 return 0;
2780 case SNDRV_PCM_IOCTL_TTSTAMP:
2781 return snd_pcm_tstamp(substream, arg);
2782 case SNDRV_PCM_IOCTL_USER_PVERSION:
2783 if (get_user(pcm_file->user_pversion,
2784 (unsigned int __user *)arg))
2785 return -EFAULT;
2786 return 0;
2787 case SNDRV_PCM_IOCTL_HW_REFINE:
2788 return snd_pcm_hw_refine_user(substream, arg);
2789 case SNDRV_PCM_IOCTL_HW_PARAMS:
2790 return snd_pcm_hw_params_user(substream, arg);
2791 case SNDRV_PCM_IOCTL_HW_FREE:
2792 return snd_pcm_hw_free(substream);
2793 case SNDRV_PCM_IOCTL_SW_PARAMS:
2794 return snd_pcm_sw_params_user(substream, arg);
2795 case SNDRV_PCM_IOCTL_STATUS:
2796 return snd_pcm_status_user(substream, arg, false);
2797 case SNDRV_PCM_IOCTL_STATUS_EXT:
2798 return snd_pcm_status_user(substream, arg, true);
2799 case SNDRV_PCM_IOCTL_CHANNEL_INFO:
2800 return snd_pcm_channel_info_user(substream, arg);
2801 case SNDRV_PCM_IOCTL_PREPARE:
2802 return snd_pcm_prepare(substream, file);
2803 case SNDRV_PCM_IOCTL_RESET:
2804 return snd_pcm_reset(substream);
2805 case SNDRV_PCM_IOCTL_START:
2806 return snd_pcm_start_lock_irq(substream);
2807 case SNDRV_PCM_IOCTL_LINK:
2808 return snd_pcm_link(substream, (int)(unsigned long) arg);
2809 case SNDRV_PCM_IOCTL_UNLINK:
2810 return snd_pcm_unlink(substream);
2811 case SNDRV_PCM_IOCTL_RESUME:
2812 return snd_pcm_resume(substream);
2813 case SNDRV_PCM_IOCTL_XRUN:
2814 return snd_pcm_xrun(substream);
2815 case SNDRV_PCM_IOCTL_HWSYNC:
2816 return snd_pcm_hwsync(substream);
2817 case SNDRV_PCM_IOCTL_DELAY:
2818 {
2819 snd_pcm_sframes_t delay = snd_pcm_delay(substream);
2820 snd_pcm_sframes_t __user *res = arg;
2821
2822 if (delay < 0)
2823 return delay;
2824 if (put_user(delay, res))
2825 return -EFAULT;
2826 return 0;
2827 }
2828 case SNDRV_PCM_IOCTL_SYNC_PTR:
2829 return snd_pcm_sync_ptr(substream, arg);
2830 #ifdef CONFIG_SND_SUPPORT_OLD_API
2831 case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
2832 return snd_pcm_hw_refine_old_user(substream, arg);
2833 case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
2834 return snd_pcm_hw_params_old_user(substream, arg);
2835 #endif
2836 case SNDRV_PCM_IOCTL_DRAIN:
2837 return snd_pcm_drain(substream, file);
2838 case SNDRV_PCM_IOCTL_DROP:
2839 return snd_pcm_drop(substream);
2840 case SNDRV_PCM_IOCTL_PAUSE:
2841 return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
2842 substream,
2843 (int)(unsigned long)arg);
2844 }
2845 pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
2846 return -ENOTTY;
2847 }
2848
2849 static int snd_pcm_common_ioctl1(struct file *file,
2850 struct snd_pcm_substream *substream,
2851 unsigned int cmd, void __user *arg)
2852 {
2853 struct snd_card *card = substream->pcm->card;
2854 int res;
2855
2856 snd_power_lock(card);
2857 res = snd_power_wait(card, SNDRV_CTL_POWER_D0);
2858 if (res >= 0)
2859 res = snd_pcm_common_ioctl(file, substream, cmd, arg);
2860 snd_power_unlock(card);
2861 return res;
2862 }
2863
2864 static int snd_pcm_playback_ioctl1(struct file *file,
2865 struct snd_pcm_substream *substream,
2866 unsigned int cmd, void __user *arg)
2867 {
2868 if (PCM_RUNTIME_CHECK(substream))
2869 return -ENXIO;
2870 if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
2871 return -EINVAL;
2872 switch (cmd) {
2873 case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
2874 {
2875 struct snd_xferi xferi;
2876 struct snd_xferi __user *_xferi = arg;
2877 struct snd_pcm_runtime *runtime = substream->runtime;
2878 snd_pcm_sframes_t result;
2879 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2880 return -EBADFD;
2881 if (put_user(0, &_xferi->result))
2882 return -EFAULT;
2883 if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
2884 return -EFAULT;
2885 result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
2886 __put_user(result, &_xferi->result);
2887 return result < 0 ? result : 0;
2888 }
2889 case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
2890 {
2891 struct snd_xfern xfern;
2892 struct snd_xfern __user *_xfern = arg;
2893 struct snd_pcm_runtime *runtime = substream->runtime;
2894 void __user **bufs;
2895 snd_pcm_sframes_t result;
2896 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2897 return -EBADFD;
2898 if (runtime->channels > 128)
2899 return -EINVAL;
2900 if (put_user(0, &_xfern->result))
2901 return -EFAULT;
2902 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2903 return -EFAULT;
2904
2905 bufs = memdup_user(xfern.bufs,
2906 sizeof(void *) * runtime->channels);
2907 if (IS_ERR(bufs))
2908 return PTR_ERR(bufs);
2909 result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
2910 kfree(bufs);
2911 __put_user(result, &_xfern->result);
2912 return result < 0 ? result : 0;
2913 }
2914 case SNDRV_PCM_IOCTL_REWIND:
2915 {
2916 snd_pcm_uframes_t frames;
2917 snd_pcm_uframes_t __user *_frames = arg;
2918 snd_pcm_sframes_t result;
2919 if (get_user(frames, _frames))
2920 return -EFAULT;
2921 if (put_user(0, _frames))
2922 return -EFAULT;
2923 result = snd_pcm_playback_rewind(substream, frames);
2924 __put_user(result, _frames);
2925 return result < 0 ? result : 0;
2926 }
2927 case SNDRV_PCM_IOCTL_FORWARD:
2928 {
2929 snd_pcm_uframes_t frames;
2930 snd_pcm_uframes_t __user *_frames = arg;
2931 snd_pcm_sframes_t result;
2932 if (get_user(frames, _frames))
2933 return -EFAULT;
2934 if (put_user(0, _frames))
2935 return -EFAULT;
2936 result = snd_pcm_playback_forward(substream, frames);
2937 __put_user(result, _frames);
2938 return result < 0 ? result : 0;
2939 }
2940 }
2941 return snd_pcm_common_ioctl1(file, substream, cmd, arg);
2942 }
2943
2944 static int snd_pcm_capture_ioctl1(struct file *file,
2945 struct snd_pcm_substream *substream,
2946 unsigned int cmd, void __user *arg)
2947 {
2948 if (PCM_RUNTIME_CHECK(substream))
2949 return -ENXIO;
2950 if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_CAPTURE))
2951 return -EINVAL;
2952 switch (cmd) {
2953 case SNDRV_PCM_IOCTL_READI_FRAMES:
2954 {
2955 struct snd_xferi xferi;
2956 struct snd_xferi __user *_xferi = arg;
2957 struct snd_pcm_runtime *runtime = substream->runtime;
2958 snd_pcm_sframes_t result;
2959 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2960 return -EBADFD;
2961 if (put_user(0, &_xferi->result))
2962 return -EFAULT;
2963 if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
2964 return -EFAULT;
2965 result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
2966 __put_user(result, &_xferi->result);
2967 return result < 0 ? result : 0;
2968 }
2969 case SNDRV_PCM_IOCTL_READN_FRAMES:
2970 {
2971 struct snd_xfern xfern;
2972 struct snd_xfern __user *_xfern = arg;
2973 struct snd_pcm_runtime *runtime = substream->runtime;
2974 void *bufs;
2975 snd_pcm_sframes_t result;
2976 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2977 return -EBADFD;
2978 if (runtime->channels > 128)
2979 return -EINVAL;
2980 if (put_user(0, &_xfern->result))
2981 return -EFAULT;
2982 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2983 return -EFAULT;
2984
2985 bufs = memdup_user(xfern.bufs,
2986 sizeof(void *) * runtime->channels);
2987 if (IS_ERR(bufs))
2988 return PTR_ERR(bufs);
2989 result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
2990 kfree(bufs);
2991 __put_user(result, &_xfern->result);
2992 return result < 0 ? result : 0;
2993 }
2994 case SNDRV_PCM_IOCTL_REWIND:
2995 {
2996 snd_pcm_uframes_t frames;
2997 snd_pcm_uframes_t __user *_frames = arg;
2998 snd_pcm_sframes_t result;
2999 if (get_user(frames, _frames))
3000 return -EFAULT;
3001 if (put_user(0, _frames))
3002 return -EFAULT;
3003 result = snd_pcm_capture_rewind(substream, frames);
3004 __put_user(result, _frames);
3005 return result < 0 ? result : 0;
3006 }
3007 case SNDRV_PCM_IOCTL_FORWARD:
3008 {
3009 snd_pcm_uframes_t frames;
3010 snd_pcm_uframes_t __user *_frames = arg;
3011 snd_pcm_sframes_t result;
3012 if (get_user(frames, _frames))
3013 return -EFAULT;
3014 if (put_user(0, _frames))
3015 return -EFAULT;
3016 result = snd_pcm_capture_forward(substream, frames);
3017 __put_user(result, _frames);
3018 return result < 0 ? result : 0;
3019 }
3020 }
3021 return snd_pcm_common_ioctl1(file, substream, cmd, arg);
3022 }
3023
3024 static long snd_pcm_playback_ioctl(struct file *file, unsigned int cmd,
3025 unsigned long arg)
3026 {
3027 struct snd_pcm_file *pcm_file;
3028
3029 pcm_file = file->private_data;
3030
3031 if (((cmd >> 8) & 0xff) != 'A')
3032 return -ENOTTY;
3033
3034 return snd_pcm_playback_ioctl1(file, pcm_file->substream, cmd,
3035 (void __user *)arg);
3036 }
3037
3038 static long snd_pcm_capture_ioctl(struct file *file, unsigned int cmd,
3039 unsigned long arg)
3040 {
3041 struct snd_pcm_file *pcm_file;
3042
3043 pcm_file = file->private_data;
3044
3045 if (((cmd >> 8) & 0xff) != 'A')
3046 return -ENOTTY;
3047
3048 return snd_pcm_capture_ioctl1(file, pcm_file->substream, cmd,
3049 (void __user *)arg);
3050 }
3051
3052 /**
3053 * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
3054 * @substream: PCM substream
3055 * @cmd: IOCTL cmd
3056 * @arg: IOCTL argument
3057 *
3058 * The function is provided primarily for OSS layer and USB gadget drivers,
3059 * and it allows only the limited set of ioctls (hw_params, sw_params,
3060 * prepare, start, drain, drop, forward).
3061 */
3062 int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3063 unsigned int cmd, void *arg)
3064 {
3065 snd_pcm_uframes_t *frames = arg;
3066 snd_pcm_sframes_t result;
3067 int err;
3068
3069 switch (cmd) {
3070 case SNDRV_PCM_IOCTL_FORWARD:
3071 {
3072 /* provided only for OSS; capture-only and no value returned */
3073 if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
3074 return -EINVAL;
3075 result = snd_pcm_capture_forward(substream, *frames);
3076 return result < 0 ? result : 0;
3077 }
3078 case SNDRV_PCM_IOCTL_HW_PARAMS:
3079 return snd_pcm_hw_params(substream, arg);
3080 case SNDRV_PCM_IOCTL_SW_PARAMS:
3081 return snd_pcm_sw_params(substream, arg);
3082 case SNDRV_PCM_IOCTL_PREPARE:
3083 return snd_pcm_prepare(substream, NULL);
3084 case SNDRV_PCM_IOCTL_START:
3085 return snd_pcm_start_lock_irq(substream);
3086 case SNDRV_PCM_IOCTL_DRAIN:
3087 snd_power_lock(substream->pcm->card);
3088 err = snd_pcm_drain(substream, NULL);
3089 snd_power_unlock(substream->pcm->card);
3090 return err;
3091 case SNDRV_PCM_IOCTL_DROP:
3092 return snd_pcm_drop(substream);
3093 case SNDRV_PCM_IOCTL_DELAY:
3094 {
3095 result = snd_pcm_delay(substream);
3096 if (result < 0)
3097 return result;
3098 *frames = result;
3099 return 0;
3100 }
3101 default:
3102 return -EINVAL;
3103 }
3104 }
3105 EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
3106
3107 static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
3108 loff_t * offset)
3109 {
3110 struct snd_pcm_file *pcm_file;
3111 struct snd_pcm_substream *substream;
3112 struct snd_pcm_runtime *runtime;
3113 snd_pcm_sframes_t result;
3114
3115 pcm_file = file->private_data;
3116 substream = pcm_file->substream;
3117 if (PCM_RUNTIME_CHECK(substream))
3118 return -ENXIO;
3119 runtime = substream->runtime;
3120 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3121 return -EBADFD;
3122 if (!frame_aligned(runtime, count))
3123 return -EINVAL;
3124 count = bytes_to_frames(runtime, count);
3125 result = snd_pcm_lib_read(substream, buf, count);
3126 if (result > 0)
3127 result = frames_to_bytes(runtime, result);
3128 return result;
3129 }
3130
3131 static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
3132 size_t count, loff_t * offset)
3133 {
3134 struct snd_pcm_file *pcm_file;
3135 struct snd_pcm_substream *substream;
3136 struct snd_pcm_runtime *runtime;
3137 snd_pcm_sframes_t result;
3138
3139 pcm_file = file->private_data;
3140 substream = pcm_file->substream;
3141 if (PCM_RUNTIME_CHECK(substream))
3142 return -ENXIO;
3143 runtime = substream->runtime;
3144 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3145 return -EBADFD;
3146 if (!frame_aligned(runtime, count))
3147 return -EINVAL;
3148 count = bytes_to_frames(runtime, count);
3149 result = snd_pcm_lib_write(substream, buf, count);
3150 if (result > 0)
3151 result = frames_to_bytes(runtime, result);
3152 return result;
3153 }
3154
3155 static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
3156 {
3157 struct snd_pcm_file *pcm_file;
3158 struct snd_pcm_substream *substream;
3159 struct snd_pcm_runtime *runtime;
3160 snd_pcm_sframes_t result;
3161 unsigned long i;
3162 void __user **bufs;
3163 snd_pcm_uframes_t frames;
3164
3165 pcm_file = iocb->ki_filp->private_data;
3166 substream = pcm_file->substream;
3167 if (PCM_RUNTIME_CHECK(substream))
3168 return -ENXIO;
3169 runtime = substream->runtime;
3170 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3171 return -EBADFD;
3172 if (!iter_is_iovec(to))
3173 return -EINVAL;
3174 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
3175 return -EINVAL;
3176 if (!frame_aligned(runtime, to->iov->iov_len))
3177 return -EINVAL;
3178 frames = bytes_to_samples(runtime, to->iov->iov_len);
3179 bufs = kmalloc(sizeof(void *) * to->nr_segs, GFP_KERNEL);
3180 if (bufs == NULL)
3181 return -ENOMEM;
3182 for (i = 0; i < to->nr_segs; ++i)
3183 bufs[i] = to->iov[i].iov_base;
3184 result = snd_pcm_lib_readv(substream, bufs, frames);
3185 if (result > 0)
3186 result = frames_to_bytes(runtime, result);
3187 kfree(bufs);
3188 return result;
3189 }
3190
3191 static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
3192 {
3193 struct snd_pcm_file *pcm_file;
3194 struct snd_pcm_substream *substream;
3195 struct snd_pcm_runtime *runtime;
3196 snd_pcm_sframes_t result;
3197 unsigned long i;
3198 void __user **bufs;
3199 snd_pcm_uframes_t frames;
3200
3201 pcm_file = iocb->ki_filp->private_data;
3202 substream = pcm_file->substream;
3203 if (PCM_RUNTIME_CHECK(substream))
3204 return -ENXIO;
3205 runtime = substream->runtime;
3206 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3207 return -EBADFD;
3208 if (!iter_is_iovec(from))
3209 return -EINVAL;
3210 if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
3211 !frame_aligned(runtime, from->iov->iov_len))
3212 return -EINVAL;
3213 frames = bytes_to_samples(runtime, from->iov->iov_len);
3214 bufs = kmalloc(sizeof(void *) * from->nr_segs, GFP_KERNEL);
3215 if (bufs == NULL)
3216 return -ENOMEM;
3217 for (i = 0; i < from->nr_segs; ++i)
3218 bufs[i] = from->iov[i].iov_base;
3219 result = snd_pcm_lib_writev(substream, bufs, frames);
3220 if (result > 0)
3221 result = frames_to_bytes(runtime, result);
3222 kfree(bufs);
3223 return result;
3224 }
3225
3226 static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait)
3227 {
3228 struct snd_pcm_file *pcm_file;
3229 struct snd_pcm_substream *substream;
3230 struct snd_pcm_runtime *runtime;
3231 unsigned int mask;
3232 snd_pcm_uframes_t avail;
3233
3234 pcm_file = file->private_data;
3235
3236 substream = pcm_file->substream;
3237 if (PCM_RUNTIME_CHECK(substream))
3238 return POLLOUT | POLLWRNORM | POLLERR;
3239 runtime = substream->runtime;
3240
3241 poll_wait(file, &runtime->sleep, wait);
3242
3243 snd_pcm_stream_lock_irq(substream);
3244 avail = snd_pcm_playback_avail(runtime);
3245 switch (runtime->status->state) {
3246 case SNDRV_PCM_STATE_RUNNING:
3247 case SNDRV_PCM_STATE_PREPARED:
3248 case SNDRV_PCM_STATE_PAUSED:
3249 if (avail >= runtime->control->avail_min) {
3250 mask = POLLOUT | POLLWRNORM;
3251 break;
3252 }
3253 /* Fall through */
3254 case SNDRV_PCM_STATE_DRAINING:
3255 mask = 0;
3256 break;
3257 default:
3258 mask = POLLOUT | POLLWRNORM | POLLERR;
3259 break;
3260 }
3261 snd_pcm_stream_unlock_irq(substream);
3262 return mask;
3263 }
3264
3265 static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait)
3266 {
3267 struct snd_pcm_file *pcm_file;
3268 struct snd_pcm_substream *substream;
3269 struct snd_pcm_runtime *runtime;
3270 unsigned int mask;
3271 snd_pcm_uframes_t avail;
3272
3273 pcm_file = file->private_data;
3274
3275 substream = pcm_file->substream;
3276 if (PCM_RUNTIME_CHECK(substream))
3277 return POLLIN | POLLRDNORM | POLLERR;
3278 runtime = substream->runtime;
3279
3280 poll_wait(file, &runtime->sleep, wait);
3281
3282 snd_pcm_stream_lock_irq(substream);
3283 avail = snd_pcm_capture_avail(runtime);
3284 switch (runtime->status->state) {
3285 case SNDRV_PCM_STATE_RUNNING:
3286 case SNDRV_PCM_STATE_PREPARED:
3287 case SNDRV_PCM_STATE_PAUSED:
3288 if (avail >= runtime->control->avail_min) {
3289 mask = POLLIN | POLLRDNORM;
3290 break;
3291 }
3292 mask = 0;
3293 break;
3294 case SNDRV_PCM_STATE_DRAINING:
3295 if (avail > 0) {
3296 mask = POLLIN | POLLRDNORM;
3297 break;
3298 }
3299 /* Fall through */
3300 default:
3301 mask = POLLIN | POLLRDNORM | POLLERR;
3302 break;
3303 }
3304 snd_pcm_stream_unlock_irq(substream);
3305 return mask;
3306 }
3307
3308 /*
3309 * mmap support
3310 */
3311
3312 /*
3313 * Only on coherent architectures, we can mmap the status and the control records
3314 * for effcient data transfer. On others, we have to use HWSYNC ioctl...
3315 */
3316 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3317 /*
3318 * mmap status record
3319 */
3320 static int snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3321 {
3322 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3323 struct snd_pcm_runtime *runtime;
3324
3325 if (substream == NULL)
3326 return VM_FAULT_SIGBUS;
3327 runtime = substream->runtime;
3328 vmf->page = virt_to_page(runtime->status);
3329 get_page(vmf->page);
3330 return 0;
3331 }
3332
3333 static const struct vm_operations_struct snd_pcm_vm_ops_status =
3334 {
3335 .fault = snd_pcm_mmap_status_fault,
3336 };
3337
3338 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3339 struct vm_area_struct *area)
3340 {
3341 long size;
3342 if (!(area->vm_flags & VM_READ))
3343 return -EINVAL;
3344 size = area->vm_end - area->vm_start;
3345 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3346 return -EINVAL;
3347 area->vm_ops = &snd_pcm_vm_ops_status;
3348 area->vm_private_data = substream;
3349 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3350 return 0;
3351 }
3352
3353 /*
3354 * mmap control record
3355 */
3356 static int snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3357 {
3358 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3359 struct snd_pcm_runtime *runtime;
3360
3361 if (substream == NULL)
3362 return VM_FAULT_SIGBUS;
3363 runtime = substream->runtime;
3364 vmf->page = virt_to_page(runtime->control);
3365 get_page(vmf->page);
3366 return 0;
3367 }
3368
3369 static const struct vm_operations_struct snd_pcm_vm_ops_control =
3370 {
3371 .fault = snd_pcm_mmap_control_fault,
3372 };
3373
3374 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3375 struct vm_area_struct *area)
3376 {
3377 long size;
3378 if (!(area->vm_flags & VM_READ))
3379 return -EINVAL;
3380 size = area->vm_end - area->vm_start;
3381 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3382 return -EINVAL;
3383 area->vm_ops = &snd_pcm_vm_ops_control;
3384 area->vm_private_data = substream;
3385 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3386 return 0;
3387 }
3388
3389 static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
3390 {
3391 if (pcm_file->no_compat_mmap)
3392 return false;
3393 /* See pcm_control_mmap_allowed() below.
3394 * Since older alsa-lib requires both status and control mmaps to be
3395 * coupled, we have to disable the status mmap for old alsa-lib, too.
3396 */
3397 if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
3398 (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
3399 return false;
3400 return true;
3401 }
3402
3403 static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
3404 {
3405 if (pcm_file->no_compat_mmap)
3406 return false;
3407 /* Disallow the control mmap when SYNC_APPLPTR flag is set;
3408 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3409 * thus it effectively assures the manual update of appl_ptr.
3410 */
3411 if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
3412 return false;
3413 return true;
3414 }
3415
3416 #else /* ! coherent mmap */
3417 /*
3418 * don't support mmap for status and control records.
3419 */
3420 #define pcm_status_mmap_allowed(pcm_file) false
3421 #define pcm_control_mmap_allowed(pcm_file) false
3422
3423 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3424 struct vm_area_struct *area)
3425 {
3426 return -ENXIO;
3427 }
3428 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3429 struct vm_area_struct *area)
3430 {
3431 return -ENXIO;
3432 }
3433 #endif /* coherent mmap */
3434
3435 static inline struct page *
3436 snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
3437 {
3438 void *vaddr = substream->runtime->dma_area + ofs;
3439 return virt_to_page(vaddr);
3440 }
3441
3442 /*
3443 * fault callback for mmapping a RAM page
3444 */
3445 static int snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3446 {
3447 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3448 struct snd_pcm_runtime *runtime;
3449 unsigned long offset;
3450 struct page * page;
3451 size_t dma_bytes;
3452
3453 if (substream == NULL)
3454 return VM_FAULT_SIGBUS;
3455 runtime = substream->runtime;
3456 offset = vmf->pgoff << PAGE_SHIFT;
3457 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3458 if (offset > dma_bytes - PAGE_SIZE)
3459 return VM_FAULT_SIGBUS;
3460 if (substream->ops->page)
3461 page = substream->ops->page(substream, offset);
3462 else
3463 page = snd_pcm_default_page_ops(substream, offset);
3464 if (!page)
3465 return VM_FAULT_SIGBUS;
3466 get_page(page);
3467 vmf->page = page;
3468 return 0;
3469 }
3470
3471 static const struct vm_operations_struct snd_pcm_vm_ops_data = {
3472 .open = snd_pcm_mmap_data_open,
3473 .close = snd_pcm_mmap_data_close,
3474 };
3475
3476 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3477 .open = snd_pcm_mmap_data_open,
3478 .close = snd_pcm_mmap_data_close,
3479 .fault = snd_pcm_mmap_data_fault,
3480 };
3481
3482 /*
3483 * mmap the DMA buffer on RAM
3484 */
3485
3486 /**
3487 * snd_pcm_lib_default_mmap - Default PCM data mmap function
3488 * @substream: PCM substream
3489 * @area: VMA
3490 *
3491 * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
3492 * this function is invoked implicitly.
3493 */
3494 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3495 struct vm_area_struct *area)
3496 {
3497 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3498 #ifdef CONFIG_GENERIC_ALLOCATOR
3499 if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
3500 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
3501 return remap_pfn_range(area, area->vm_start,
3502 substream->dma_buffer.addr >> PAGE_SHIFT,
3503 area->vm_end - area->vm_start, area->vm_page_prot);
3504 }
3505 #endif /* CONFIG_GENERIC_ALLOCATOR */
3506 #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
3507 if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
3508 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
3509 return dma_mmap_coherent(substream->dma_buffer.dev.dev,
3510 area,
3511 substream->runtime->dma_area,
3512 substream->runtime->dma_addr,
3513 area->vm_end - area->vm_start);
3514 #endif /* CONFIG_X86 */
3515 /* mmap with fault handler */
3516 area->vm_ops = &snd_pcm_vm_ops_data_fault;
3517 return 0;
3518 }
3519 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3520
3521 /*
3522 * mmap the DMA buffer on I/O memory area
3523 */
3524 #if SNDRV_PCM_INFO_MMAP_IOMEM
3525 /**
3526 * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3527 * @substream: PCM substream
3528 * @area: VMA
3529 *
3530 * When your hardware uses the iomapped pages as the hardware buffer and
3531 * wants to mmap it, pass this function as mmap pcm_ops. Note that this
3532 * is supposed to work only on limited architectures.
3533 */
3534 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3535 struct vm_area_struct *area)
3536 {
3537 struct snd_pcm_runtime *runtime = substream->runtime;;
3538
3539 area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3540 return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3541 }
3542 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
3543 #endif /* SNDRV_PCM_INFO_MMAP */
3544
3545 /*
3546 * mmap DMA buffer
3547 */
3548 int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
3549 struct vm_area_struct *area)
3550 {
3551 struct snd_pcm_runtime *runtime;
3552 long size;
3553 unsigned long offset;
3554 size_t dma_bytes;
3555 int err;
3556
3557 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3558 if (!(area->vm_flags & (VM_WRITE|VM_READ)))
3559 return -EINVAL;
3560 } else {
3561 if (!(area->vm_flags & VM_READ))
3562 return -EINVAL;
3563 }
3564 runtime = substream->runtime;
3565 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3566 return -EBADFD;
3567 if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
3568 return -ENXIO;
3569 if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
3570 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
3571 return -EINVAL;
3572 size = area->vm_end - area->vm_start;
3573 offset = area->vm_pgoff << PAGE_SHIFT;
3574 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3575 if ((size_t)size > dma_bytes)
3576 return -EINVAL;
3577 if (offset > dma_bytes - size)
3578 return -EINVAL;
3579
3580 area->vm_ops = &snd_pcm_vm_ops_data;
3581 area->vm_private_data = substream;
3582 if (substream->ops->mmap)
3583 err = substream->ops->mmap(substream, area);
3584 else
3585 err = snd_pcm_lib_default_mmap(substream, area);
3586 if (!err)
3587 atomic_inc(&substream->mmap_count);
3588 return err;
3589 }
3590 EXPORT_SYMBOL(snd_pcm_mmap_data);
3591
3592 static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
3593 {
3594 struct snd_pcm_file * pcm_file;
3595 struct snd_pcm_substream *substream;
3596 unsigned long offset;
3597
3598 pcm_file = file->private_data;
3599 substream = pcm_file->substream;
3600 if (PCM_RUNTIME_CHECK(substream))
3601 return -ENXIO;
3602
3603 offset = area->vm_pgoff << PAGE_SHIFT;
3604 switch (offset) {
3605 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3606 if (!pcm_status_mmap_allowed(pcm_file))
3607 return -ENXIO;
3608 return snd_pcm_mmap_status(substream, file, area);
3609 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3610 if (!pcm_control_mmap_allowed(pcm_file))
3611 return -ENXIO;
3612 return snd_pcm_mmap_control(substream, file, area);
3613 default:
3614 return snd_pcm_mmap_data(substream, file, area);
3615 }
3616 return 0;
3617 }
3618
3619 static int snd_pcm_fasync(int fd, struct file * file, int on)
3620 {
3621 struct snd_pcm_file * pcm_file;
3622 struct snd_pcm_substream *substream;
3623 struct snd_pcm_runtime *runtime;
3624
3625 pcm_file = file->private_data;
3626 substream = pcm_file->substream;
3627 if (PCM_RUNTIME_CHECK(substream))
3628 return -ENXIO;
3629 runtime = substream->runtime;
3630 return fasync_helper(fd, file, on, &runtime->fasync);
3631 }
3632
3633 /*
3634 * ioctl32 compat
3635 */
3636 #ifdef CONFIG_COMPAT
3637 #include "pcm_compat.c"
3638 #else
3639 #define snd_pcm_ioctl_compat NULL
3640 #endif
3641
3642 /*
3643 * To be removed helpers to keep binary compatibility
3644 */
3645
3646 #ifdef CONFIG_SND_SUPPORT_OLD_API
3647 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
3648 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
3649
3650 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
3651 struct snd_pcm_hw_params_old *oparams)
3652 {
3653 unsigned int i;
3654
3655 memset(params, 0, sizeof(*params));
3656 params->flags = oparams->flags;
3657 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3658 params->masks[i].bits[0] = oparams->masks[i];
3659 memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
3660 params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
3661 params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
3662 params->info = oparams->info;
3663 params->msbits = oparams->msbits;
3664 params->rate_num = oparams->rate_num;
3665 params->rate_den = oparams->rate_den;
3666 params->fifo_size = oparams->fifo_size;
3667 }
3668
3669 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
3670 struct snd_pcm_hw_params *params)
3671 {
3672 unsigned int i;
3673
3674 memset(oparams, 0, sizeof(*oparams));
3675 oparams->flags = params->flags;
3676 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3677 oparams->masks[i] = params->masks[i].bits[0];
3678 memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
3679 oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
3680 oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
3681 oparams->info = params->info;
3682 oparams->msbits = params->msbits;
3683 oparams->rate_num = params->rate_num;
3684 oparams->rate_den = params->rate_den;
3685 oparams->fifo_size = params->fifo_size;
3686 }
3687
3688 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3689 struct snd_pcm_hw_params_old __user * _oparams)
3690 {
3691 struct snd_pcm_hw_params *params;
3692 struct snd_pcm_hw_params_old *oparams = NULL;
3693 int err;
3694
3695 params = kmalloc(sizeof(*params), GFP_KERNEL);
3696 if (!params)
3697 return -ENOMEM;
3698
3699 oparams = memdup_user(_oparams, sizeof(*oparams));
3700 if (IS_ERR(oparams)) {
3701 err = PTR_ERR(oparams);
3702 goto out;
3703 }
3704 snd_pcm_hw_convert_from_old_params(params, oparams);
3705 err = snd_pcm_hw_refine(substream, params);
3706 if (err < 0)
3707 goto out_old;
3708
3709 err = fixup_unreferenced_params(substream, params);
3710 if (err < 0)
3711 goto out_old;
3712
3713 snd_pcm_hw_convert_to_old_params(oparams, params);
3714 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3715 err = -EFAULT;
3716 out_old:
3717 kfree(oparams);
3718 out:
3719 kfree(params);
3720 return err;
3721 }
3722
3723 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3724 struct snd_pcm_hw_params_old __user * _oparams)
3725 {
3726 struct snd_pcm_hw_params *params;
3727 struct snd_pcm_hw_params_old *oparams = NULL;
3728 int err;
3729
3730 params = kmalloc(sizeof(*params), GFP_KERNEL);
3731 if (!params)
3732 return -ENOMEM;
3733
3734 oparams = memdup_user(_oparams, sizeof(*oparams));
3735 if (IS_ERR(oparams)) {
3736 err = PTR_ERR(oparams);
3737 goto out;
3738 }
3739
3740 snd_pcm_hw_convert_from_old_params(params, oparams);
3741 err = snd_pcm_hw_params(substream, params);
3742 if (err < 0)
3743 goto out_old;
3744
3745 snd_pcm_hw_convert_to_old_params(oparams, params);
3746 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3747 err = -EFAULT;
3748 out_old:
3749 kfree(oparams);
3750 out:
3751 kfree(params);
3752 return err;
3753 }
3754 #endif /* CONFIG_SND_SUPPORT_OLD_API */
3755
3756 #ifndef CONFIG_MMU
3757 static unsigned long snd_pcm_get_unmapped_area(struct file *file,
3758 unsigned long addr,
3759 unsigned long len,
3760 unsigned long pgoff,
3761 unsigned long flags)
3762 {
3763 struct snd_pcm_file *pcm_file = file->private_data;
3764 struct snd_pcm_substream *substream = pcm_file->substream;
3765 struct snd_pcm_runtime *runtime = substream->runtime;
3766 unsigned long offset = pgoff << PAGE_SHIFT;
3767
3768 switch (offset) {
3769 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3770 return (unsigned long)runtime->status;
3771 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3772 return (unsigned long)runtime->control;
3773 default:
3774 return (unsigned long)runtime->dma_area + offset;
3775 }
3776 }
3777 #else
3778 # define snd_pcm_get_unmapped_area NULL
3779 #endif
3780
3781 /*
3782 * Register section
3783 */
3784
3785 const struct file_operations snd_pcm_f_ops[2] = {
3786 {
3787 .owner = THIS_MODULE,
3788 .write = snd_pcm_write,
3789 .write_iter = snd_pcm_writev,
3790 .open = snd_pcm_playback_open,
3791 .release = snd_pcm_release,
3792 .llseek = no_llseek,
3793 .poll = snd_pcm_playback_poll,
3794 .unlocked_ioctl = snd_pcm_playback_ioctl,
3795 .compat_ioctl = snd_pcm_ioctl_compat,
3796 .mmap = snd_pcm_mmap,
3797 .fasync = snd_pcm_fasync,
3798 .get_unmapped_area = snd_pcm_get_unmapped_area,
3799 },
3800 {
3801 .owner = THIS_MODULE,
3802 .read = snd_pcm_read,
3803 .read_iter = snd_pcm_readv,
3804 .open = snd_pcm_capture_open,
3805 .release = snd_pcm_release,
3806 .llseek = no_llseek,
3807 .poll = snd_pcm_capture_poll,
3808 .unlocked_ioctl = snd_pcm_capture_ioctl,
3809 .compat_ioctl = snd_pcm_ioctl_compat,
3810 .mmap = snd_pcm_mmap,
3811 .fasync = snd_pcm_fasync,
3812 .get_unmapped_area = snd_pcm_get_unmapped_area,
3813 }
3814 };