]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - sound/core/pcm_native.c
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-focal-kernel.git] / sound / core / pcm_native.c
1 /*
2 * Digital Audio (PCM) abstract layer
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 *
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/file.h>
25 #include <linux/slab.h>
26 #include <linux/sched/signal.h>
27 #include <linux/time.h>
28 #include <linux/pm_qos.h>
29 #include <linux/io.h>
30 #include <linux/dma-mapping.h>
31 #include <sound/core.h>
32 #include <sound/control.h>
33 #include <sound/info.h>
34 #include <sound/pcm.h>
35 #include <sound/pcm_params.h>
36 #include <sound/timer.h>
37 #include <sound/minors.h>
38 #include <linux/uio.h>
39 #include <linux/delay.h>
40
41 #include "pcm_local.h"
42
43 #ifdef CONFIG_SND_DEBUG
44 #define CREATE_TRACE_POINTS
45 #include "pcm_param_trace.h"
46 #else
47 #define trace_hw_mask_param_enabled() 0
48 #define trace_hw_interval_param_enabled() 0
49 #define trace_hw_mask_param(substream, type, index, prev, curr)
50 #define trace_hw_interval_param(substream, type, index, prev, curr)
51 #endif
52
53 /*
54 * Compatibility
55 */
56
57 struct snd_pcm_hw_params_old {
58 unsigned int flags;
59 unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
60 SNDRV_PCM_HW_PARAM_ACCESS + 1];
61 struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
62 SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
63 unsigned int rmask;
64 unsigned int cmask;
65 unsigned int info;
66 unsigned int msbits;
67 unsigned int rate_num;
68 unsigned int rate_den;
69 snd_pcm_uframes_t fifo_size;
70 unsigned char reserved[64];
71 };
72
73 #ifdef CONFIG_SND_SUPPORT_OLD_API
74 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
75 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
76
77 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
78 struct snd_pcm_hw_params_old __user * _oparams);
79 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
80 struct snd_pcm_hw_params_old __user * _oparams);
81 #endif
82 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
83
84 /*
85 *
86 */
87
88 static DECLARE_RWSEM(snd_pcm_link_rwsem);
89
90 void snd_pcm_group_init(struct snd_pcm_group *group)
91 {
92 spin_lock_init(&group->lock);
93 mutex_init(&group->mutex);
94 INIT_LIST_HEAD(&group->substreams);
95 refcount_set(&group->refs, 0);
96 }
97
98 /* define group lock helpers */
99 #define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \
100 static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
101 { \
102 if (nonatomic) \
103 mutex_ ## mutex_action(&group->mutex); \
104 else \
105 spin_ ## action(&group->lock); \
106 }
107
108 DEFINE_PCM_GROUP_LOCK(lock, lock);
109 DEFINE_PCM_GROUP_LOCK(unlock, unlock);
110 DEFINE_PCM_GROUP_LOCK(lock_irq, lock);
111 DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock);
112
113 /**
114 * snd_pcm_stream_lock - Lock the PCM stream
115 * @substream: PCM substream
116 *
117 * This locks the PCM stream's spinlock or mutex depending on the nonatomic
118 * flag of the given substream. This also takes the global link rw lock
119 * (or rw sem), too, for avoiding the race with linked streams.
120 */
121 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
122 {
123 snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic);
124 }
125 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
126
127 /**
128 * snd_pcm_stream_lock - Unlock the PCM stream
129 * @substream: PCM substream
130 *
131 * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
132 */
133 void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
134 {
135 snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic);
136 }
137 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
138
139 /**
140 * snd_pcm_stream_lock_irq - Lock the PCM stream
141 * @substream: PCM substream
142 *
143 * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
144 * IRQ (only when nonatomic is false). In nonatomic case, this is identical
145 * as snd_pcm_stream_lock().
146 */
147 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
148 {
149 snd_pcm_group_lock_irq(&substream->self_group,
150 substream->pcm->nonatomic);
151 }
152 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
153
154 /**
155 * snd_pcm_stream_unlock_irq - Unlock the PCM stream
156 * @substream: PCM substream
157 *
158 * This is a counter-part of snd_pcm_stream_lock_irq().
159 */
160 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
161 {
162 snd_pcm_group_unlock_irq(&substream->self_group,
163 substream->pcm->nonatomic);
164 }
165 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
166
167 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
168 {
169 unsigned long flags = 0;
170 if (substream->pcm->nonatomic)
171 mutex_lock(&substream->self_group.mutex);
172 else
173 spin_lock_irqsave(&substream->self_group.lock, flags);
174 return flags;
175 }
176 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
177
178 /**
179 * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
180 * @substream: PCM substream
181 * @flags: irq flags
182 *
183 * This is a counter-part of snd_pcm_stream_lock_irqsave().
184 */
185 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
186 unsigned long flags)
187 {
188 if (substream->pcm->nonatomic)
189 mutex_unlock(&substream->self_group.mutex);
190 else
191 spin_unlock_irqrestore(&substream->self_group.lock, flags);
192 }
193 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
194
195 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
196 {
197 struct snd_pcm *pcm = substream->pcm;
198 struct snd_pcm_str *pstr = substream->pstr;
199
200 memset(info, 0, sizeof(*info));
201 info->card = pcm->card->number;
202 info->device = pcm->device;
203 info->stream = substream->stream;
204 info->subdevice = substream->number;
205 strlcpy(info->id, pcm->id, sizeof(info->id));
206 strlcpy(info->name, pcm->name, sizeof(info->name));
207 info->dev_class = pcm->dev_class;
208 info->dev_subclass = pcm->dev_subclass;
209 info->subdevices_count = pstr->substream_count;
210 info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
211 strlcpy(info->subname, substream->name, sizeof(info->subname));
212
213 return 0;
214 }
215
216 int snd_pcm_info_user(struct snd_pcm_substream *substream,
217 struct snd_pcm_info __user * _info)
218 {
219 struct snd_pcm_info *info;
220 int err;
221
222 info = kmalloc(sizeof(*info), GFP_KERNEL);
223 if (! info)
224 return -ENOMEM;
225 err = snd_pcm_info(substream, info);
226 if (err >= 0) {
227 if (copy_to_user(_info, info, sizeof(*info)))
228 err = -EFAULT;
229 }
230 kfree(info);
231 return err;
232 }
233
234 static bool hw_support_mmap(struct snd_pcm_substream *substream)
235 {
236 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
237 return false;
238 /* architecture supports dma_mmap_coherent()? */
239 #if defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) || !defined(CONFIG_HAS_DMA)
240 if (!substream->ops->mmap &&
241 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
242 return false;
243 #endif
244 return true;
245 }
246
247 static int constrain_mask_params(struct snd_pcm_substream *substream,
248 struct snd_pcm_hw_params *params)
249 {
250 struct snd_pcm_hw_constraints *constrs =
251 &substream->runtime->hw_constraints;
252 struct snd_mask *m;
253 unsigned int k;
254 struct snd_mask old_mask;
255 int changed;
256
257 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
258 m = hw_param_mask(params, k);
259 if (snd_mask_empty(m))
260 return -EINVAL;
261
262 /* This parameter is not requested to change by a caller. */
263 if (!(params->rmask & (1 << k)))
264 continue;
265
266 if (trace_hw_mask_param_enabled())
267 old_mask = *m;
268
269 changed = snd_mask_refine(m, constrs_mask(constrs, k));
270 if (changed < 0)
271 return changed;
272 if (changed == 0)
273 continue;
274
275 /* Set corresponding flag so that the caller gets it. */
276 trace_hw_mask_param(substream, k, 0, &old_mask, m);
277 params->cmask |= 1 << k;
278 }
279
280 return 0;
281 }
282
283 static int constrain_interval_params(struct snd_pcm_substream *substream,
284 struct snd_pcm_hw_params *params)
285 {
286 struct snd_pcm_hw_constraints *constrs =
287 &substream->runtime->hw_constraints;
288 struct snd_interval *i;
289 unsigned int k;
290 struct snd_interval old_interval;
291 int changed;
292
293 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
294 i = hw_param_interval(params, k);
295 if (snd_interval_empty(i))
296 return -EINVAL;
297
298 /* This parameter is not requested to change by a caller. */
299 if (!(params->rmask & (1 << k)))
300 continue;
301
302 if (trace_hw_interval_param_enabled())
303 old_interval = *i;
304
305 changed = snd_interval_refine(i, constrs_interval(constrs, k));
306 if (changed < 0)
307 return changed;
308 if (changed == 0)
309 continue;
310
311 /* Set corresponding flag so that the caller gets it. */
312 trace_hw_interval_param(substream, k, 0, &old_interval, i);
313 params->cmask |= 1 << k;
314 }
315
316 return 0;
317 }
318
319 static int constrain_params_by_rules(struct snd_pcm_substream *substream,
320 struct snd_pcm_hw_params *params)
321 {
322 struct snd_pcm_hw_constraints *constrs =
323 &substream->runtime->hw_constraints;
324 unsigned int k;
325 unsigned int *rstamps;
326 unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
327 unsigned int stamp;
328 struct snd_pcm_hw_rule *r;
329 unsigned int d;
330 struct snd_mask old_mask;
331 struct snd_interval old_interval;
332 bool again;
333 int changed, err = 0;
334
335 /*
336 * Each application of rule has own sequence number.
337 *
338 * Each member of 'rstamps' array represents the sequence number of
339 * recent application of corresponding rule.
340 */
341 rstamps = kcalloc(constrs->rules_num, sizeof(unsigned int), GFP_KERNEL);
342 if (!rstamps)
343 return -ENOMEM;
344
345 /*
346 * Each member of 'vstamps' array represents the sequence number of
347 * recent application of rule in which corresponding parameters were
348 * changed.
349 *
350 * In initial state, elements corresponding to parameters requested by
351 * a caller is 1. For unrequested parameters, corresponding members
352 * have 0 so that the parameters are never changed anymore.
353 */
354 for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
355 vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
356
357 /* Due to the above design, actual sequence number starts at 2. */
358 stamp = 2;
359 retry:
360 /* Apply all rules in order. */
361 again = false;
362 for (k = 0; k < constrs->rules_num; k++) {
363 r = &constrs->rules[k];
364
365 /*
366 * Check condition bits of this rule. When the rule has
367 * some condition bits, parameter without the bits is
368 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
369 * is an example of the condition bits.
370 */
371 if (r->cond && !(r->cond & params->flags))
372 continue;
373
374 /*
375 * The 'deps' array includes maximum three dependencies
376 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
377 * member of this array is a sentinel and should be
378 * negative value.
379 *
380 * This rule should be processed in this time when dependent
381 * parameters were changed at former applications of the other
382 * rules.
383 */
384 for (d = 0; r->deps[d] >= 0; d++) {
385 if (vstamps[r->deps[d]] > rstamps[k])
386 break;
387 }
388 if (r->deps[d] < 0)
389 continue;
390
391 if (trace_hw_mask_param_enabled()) {
392 if (hw_is_mask(r->var))
393 old_mask = *hw_param_mask(params, r->var);
394 }
395 if (trace_hw_interval_param_enabled()) {
396 if (hw_is_interval(r->var))
397 old_interval = *hw_param_interval(params, r->var);
398 }
399
400 changed = r->func(params, r);
401 if (changed < 0) {
402 err = changed;
403 goto out;
404 }
405
406 /*
407 * When the parameter is changed, notify it to the caller
408 * by corresponding returned bit, then preparing for next
409 * iteration.
410 */
411 if (changed && r->var >= 0) {
412 if (hw_is_mask(r->var)) {
413 trace_hw_mask_param(substream, r->var,
414 k + 1, &old_mask,
415 hw_param_mask(params, r->var));
416 }
417 if (hw_is_interval(r->var)) {
418 trace_hw_interval_param(substream, r->var,
419 k + 1, &old_interval,
420 hw_param_interval(params, r->var));
421 }
422
423 params->cmask |= (1 << r->var);
424 vstamps[r->var] = stamp;
425 again = true;
426 }
427
428 rstamps[k] = stamp++;
429 }
430
431 /* Iterate to evaluate all rules till no parameters are changed. */
432 if (again)
433 goto retry;
434
435 out:
436 kfree(rstamps);
437 return err;
438 }
439
440 static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
441 struct snd_pcm_hw_params *params)
442 {
443 const struct snd_interval *i;
444 const struct snd_mask *m;
445 int err;
446
447 if (!params->msbits) {
448 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
449 if (snd_interval_single(i))
450 params->msbits = snd_interval_value(i);
451 }
452
453 if (!params->rate_den) {
454 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
455 if (snd_interval_single(i)) {
456 params->rate_num = snd_interval_value(i);
457 params->rate_den = 1;
458 }
459 }
460
461 if (!params->fifo_size) {
462 m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
463 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
464 if (snd_mask_single(m) && snd_interval_single(i)) {
465 err = substream->ops->ioctl(substream,
466 SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
467 if (err < 0)
468 return err;
469 }
470 }
471
472 if (!params->info) {
473 params->info = substream->runtime->hw.info;
474 params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
475 SNDRV_PCM_INFO_DRAIN_TRIGGER);
476 if (!hw_support_mmap(substream))
477 params->info &= ~(SNDRV_PCM_INFO_MMAP |
478 SNDRV_PCM_INFO_MMAP_VALID);
479 }
480
481 return 0;
482 }
483
484 int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
485 struct snd_pcm_hw_params *params)
486 {
487 int err;
488
489 params->info = 0;
490 params->fifo_size = 0;
491 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
492 params->msbits = 0;
493 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
494 params->rate_num = 0;
495 params->rate_den = 0;
496 }
497
498 err = constrain_mask_params(substream, params);
499 if (err < 0)
500 return err;
501
502 err = constrain_interval_params(substream, params);
503 if (err < 0)
504 return err;
505
506 err = constrain_params_by_rules(substream, params);
507 if (err < 0)
508 return err;
509
510 params->rmask = 0;
511
512 return 0;
513 }
514 EXPORT_SYMBOL(snd_pcm_hw_refine);
515
516 static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
517 struct snd_pcm_hw_params __user * _params)
518 {
519 struct snd_pcm_hw_params *params;
520 int err;
521
522 params = memdup_user(_params, sizeof(*params));
523 if (IS_ERR(params))
524 return PTR_ERR(params);
525
526 err = snd_pcm_hw_refine(substream, params);
527 if (err < 0)
528 goto end;
529
530 err = fixup_unreferenced_params(substream, params);
531 if (err < 0)
532 goto end;
533
534 if (copy_to_user(_params, params, sizeof(*params)))
535 err = -EFAULT;
536 end:
537 kfree(params);
538 return err;
539 }
540
541 static int period_to_usecs(struct snd_pcm_runtime *runtime)
542 {
543 int usecs;
544
545 if (! runtime->rate)
546 return -1; /* invalid */
547
548 /* take 75% of period time as the deadline */
549 usecs = (750000 / runtime->rate) * runtime->period_size;
550 usecs += ((750000 % runtime->rate) * runtime->period_size) /
551 runtime->rate;
552
553 return usecs;
554 }
555
556 static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
557 {
558 snd_pcm_stream_lock_irq(substream);
559 if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
560 substream->runtime->status->state = state;
561 snd_pcm_stream_unlock_irq(substream);
562 }
563
564 static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
565 int event)
566 {
567 #ifdef CONFIG_SND_PCM_TIMER
568 if (substream->timer)
569 snd_timer_notify(substream->timer, event,
570 &substream->runtime->trigger_tstamp);
571 #endif
572 }
573
574 /**
575 * snd_pcm_hw_param_choose - choose a configuration defined by @params
576 * @pcm: PCM instance
577 * @params: the hw_params instance
578 *
579 * Choose one configuration from configuration space defined by @params.
580 * The configuration chosen is that obtained fixing in this order:
581 * first access, first format, first subformat, min channels,
582 * min rate, min period time, max buffer size, min tick time
583 *
584 * Return: Zero if successful, or a negative error code on failure.
585 */
586 static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
587 struct snd_pcm_hw_params *params)
588 {
589 static const int vars[] = {
590 SNDRV_PCM_HW_PARAM_ACCESS,
591 SNDRV_PCM_HW_PARAM_FORMAT,
592 SNDRV_PCM_HW_PARAM_SUBFORMAT,
593 SNDRV_PCM_HW_PARAM_CHANNELS,
594 SNDRV_PCM_HW_PARAM_RATE,
595 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
596 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
597 SNDRV_PCM_HW_PARAM_TICK_TIME,
598 -1
599 };
600 const int *v;
601 struct snd_mask old_mask;
602 struct snd_interval old_interval;
603 int changed;
604
605 for (v = vars; *v != -1; v++) {
606 /* Keep old parameter to trace. */
607 if (trace_hw_mask_param_enabled()) {
608 if (hw_is_mask(*v))
609 old_mask = *hw_param_mask(params, *v);
610 }
611 if (trace_hw_interval_param_enabled()) {
612 if (hw_is_interval(*v))
613 old_interval = *hw_param_interval(params, *v);
614 }
615 if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
616 changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
617 else
618 changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
619 if (changed < 0)
620 return changed;
621 if (changed == 0)
622 continue;
623
624 /* Trace the changed parameter. */
625 if (hw_is_mask(*v)) {
626 trace_hw_mask_param(pcm, *v, 0, &old_mask,
627 hw_param_mask(params, *v));
628 }
629 if (hw_is_interval(*v)) {
630 trace_hw_interval_param(pcm, *v, 0, &old_interval,
631 hw_param_interval(params, *v));
632 }
633 }
634
635 return 0;
636 }
637
638 static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
639 struct snd_pcm_hw_params *params)
640 {
641 struct snd_pcm_runtime *runtime;
642 int err, usecs;
643 unsigned int bits;
644 snd_pcm_uframes_t frames;
645
646 if (PCM_RUNTIME_CHECK(substream))
647 return -ENXIO;
648 runtime = substream->runtime;
649 snd_pcm_stream_lock_irq(substream);
650 switch (runtime->status->state) {
651 case SNDRV_PCM_STATE_OPEN:
652 case SNDRV_PCM_STATE_SETUP:
653 case SNDRV_PCM_STATE_PREPARED:
654 break;
655 default:
656 snd_pcm_stream_unlock_irq(substream);
657 return -EBADFD;
658 }
659 snd_pcm_stream_unlock_irq(substream);
660 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
661 if (!substream->oss.oss)
662 #endif
663 if (atomic_read(&substream->mmap_count))
664 return -EBADFD;
665
666 params->rmask = ~0U;
667 err = snd_pcm_hw_refine(substream, params);
668 if (err < 0)
669 goto _error;
670
671 err = snd_pcm_hw_params_choose(substream, params);
672 if (err < 0)
673 goto _error;
674
675 err = fixup_unreferenced_params(substream, params);
676 if (err < 0)
677 goto _error;
678
679 if (substream->ops->hw_params != NULL) {
680 err = substream->ops->hw_params(substream, params);
681 if (err < 0)
682 goto _error;
683 }
684
685 runtime->access = params_access(params);
686 runtime->format = params_format(params);
687 runtime->subformat = params_subformat(params);
688 runtime->channels = params_channels(params);
689 runtime->rate = params_rate(params);
690 runtime->period_size = params_period_size(params);
691 runtime->periods = params_periods(params);
692 runtime->buffer_size = params_buffer_size(params);
693 runtime->info = params->info;
694 runtime->rate_num = params->rate_num;
695 runtime->rate_den = params->rate_den;
696 runtime->no_period_wakeup =
697 (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
698 (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
699
700 bits = snd_pcm_format_physical_width(runtime->format);
701 runtime->sample_bits = bits;
702 bits *= runtime->channels;
703 runtime->frame_bits = bits;
704 frames = 1;
705 while (bits % 8 != 0) {
706 bits *= 2;
707 frames *= 2;
708 }
709 runtime->byte_align = bits / 8;
710 runtime->min_align = frames;
711
712 /* Default sw params */
713 runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
714 runtime->period_step = 1;
715 runtime->control->avail_min = runtime->period_size;
716 runtime->start_threshold = 1;
717 runtime->stop_threshold = runtime->buffer_size;
718 runtime->silence_threshold = 0;
719 runtime->silence_size = 0;
720 runtime->boundary = runtime->buffer_size;
721 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
722 runtime->boundary *= 2;
723
724 snd_pcm_timer_resolution_change(substream);
725 snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
726
727 if (pm_qos_request_active(&substream->latency_pm_qos_req))
728 pm_qos_remove_request(&substream->latency_pm_qos_req);
729 if ((usecs = period_to_usecs(runtime)) >= 0)
730 pm_qos_add_request(&substream->latency_pm_qos_req,
731 PM_QOS_CPU_DMA_LATENCY, usecs);
732 return 0;
733 _error:
734 /* hardware might be unusable from this time,
735 so we force application to retry to set
736 the correct hardware parameter settings */
737 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
738 if (substream->ops->hw_free != NULL)
739 substream->ops->hw_free(substream);
740 return err;
741 }
742
743 static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
744 struct snd_pcm_hw_params __user * _params)
745 {
746 struct snd_pcm_hw_params *params;
747 int err;
748
749 params = memdup_user(_params, sizeof(*params));
750 if (IS_ERR(params))
751 return PTR_ERR(params);
752
753 err = snd_pcm_hw_params(substream, params);
754 if (err < 0)
755 goto end;
756
757 if (copy_to_user(_params, params, sizeof(*params)))
758 err = -EFAULT;
759 end:
760 kfree(params);
761 return err;
762 }
763
764 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
765 {
766 struct snd_pcm_runtime *runtime;
767 int result = 0;
768
769 if (PCM_RUNTIME_CHECK(substream))
770 return -ENXIO;
771 runtime = substream->runtime;
772 snd_pcm_stream_lock_irq(substream);
773 switch (runtime->status->state) {
774 case SNDRV_PCM_STATE_SETUP:
775 case SNDRV_PCM_STATE_PREPARED:
776 break;
777 default:
778 snd_pcm_stream_unlock_irq(substream);
779 return -EBADFD;
780 }
781 snd_pcm_stream_unlock_irq(substream);
782 if (atomic_read(&substream->mmap_count))
783 return -EBADFD;
784 if (substream->ops->hw_free)
785 result = substream->ops->hw_free(substream);
786 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
787 pm_qos_remove_request(&substream->latency_pm_qos_req);
788 return result;
789 }
790
791 static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
792 struct snd_pcm_sw_params *params)
793 {
794 struct snd_pcm_runtime *runtime;
795 int err;
796
797 if (PCM_RUNTIME_CHECK(substream))
798 return -ENXIO;
799 runtime = substream->runtime;
800 snd_pcm_stream_lock_irq(substream);
801 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
802 snd_pcm_stream_unlock_irq(substream);
803 return -EBADFD;
804 }
805 snd_pcm_stream_unlock_irq(substream);
806
807 if (params->tstamp_mode < 0 ||
808 params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
809 return -EINVAL;
810 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
811 params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
812 return -EINVAL;
813 if (params->avail_min == 0)
814 return -EINVAL;
815 if (params->silence_size >= runtime->boundary) {
816 if (params->silence_threshold != 0)
817 return -EINVAL;
818 } else {
819 if (params->silence_size > params->silence_threshold)
820 return -EINVAL;
821 if (params->silence_threshold > runtime->buffer_size)
822 return -EINVAL;
823 }
824 err = 0;
825 snd_pcm_stream_lock_irq(substream);
826 runtime->tstamp_mode = params->tstamp_mode;
827 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
828 runtime->tstamp_type = params->tstamp_type;
829 runtime->period_step = params->period_step;
830 runtime->control->avail_min = params->avail_min;
831 runtime->start_threshold = params->start_threshold;
832 runtime->stop_threshold = params->stop_threshold;
833 runtime->silence_threshold = params->silence_threshold;
834 runtime->silence_size = params->silence_size;
835 params->boundary = runtime->boundary;
836 if (snd_pcm_running(substream)) {
837 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
838 runtime->silence_size > 0)
839 snd_pcm_playback_silence(substream, ULONG_MAX);
840 err = snd_pcm_update_state(substream, runtime);
841 }
842 snd_pcm_stream_unlock_irq(substream);
843 return err;
844 }
845
846 static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
847 struct snd_pcm_sw_params __user * _params)
848 {
849 struct snd_pcm_sw_params params;
850 int err;
851 if (copy_from_user(&params, _params, sizeof(params)))
852 return -EFAULT;
853 err = snd_pcm_sw_params(substream, &params);
854 if (copy_to_user(_params, &params, sizeof(params)))
855 return -EFAULT;
856 return err;
857 }
858
859 static inline snd_pcm_uframes_t
860 snd_pcm_calc_delay(struct snd_pcm_substream *substream)
861 {
862 snd_pcm_uframes_t delay;
863
864 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
865 delay = snd_pcm_playback_hw_avail(substream->runtime);
866 else
867 delay = snd_pcm_capture_avail(substream->runtime);
868 return delay + substream->runtime->delay;
869 }
870
871 int snd_pcm_status(struct snd_pcm_substream *substream,
872 struct snd_pcm_status *status)
873 {
874 struct snd_pcm_runtime *runtime = substream->runtime;
875
876 snd_pcm_stream_lock_irq(substream);
877
878 snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
879 &runtime->audio_tstamp_config);
880
881 /* backwards compatible behavior */
882 if (runtime->audio_tstamp_config.type_requested ==
883 SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
884 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
885 runtime->audio_tstamp_config.type_requested =
886 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
887 else
888 runtime->audio_tstamp_config.type_requested =
889 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
890 runtime->audio_tstamp_report.valid = 0;
891 } else
892 runtime->audio_tstamp_report.valid = 1;
893
894 status->state = runtime->status->state;
895 status->suspended_state = runtime->status->suspended_state;
896 if (status->state == SNDRV_PCM_STATE_OPEN)
897 goto _end;
898 status->trigger_tstamp = runtime->trigger_tstamp;
899 if (snd_pcm_running(substream)) {
900 snd_pcm_update_hw_ptr(substream);
901 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
902 status->tstamp = runtime->status->tstamp;
903 status->driver_tstamp = runtime->driver_tstamp;
904 status->audio_tstamp =
905 runtime->status->audio_tstamp;
906 if (runtime->audio_tstamp_report.valid == 1)
907 /* backwards compatibility, no report provided in COMPAT mode */
908 snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
909 &status->audio_tstamp_accuracy,
910 &runtime->audio_tstamp_report);
911
912 goto _tstamp_end;
913 }
914 } else {
915 /* get tstamp only in fallback mode and only if enabled */
916 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
917 snd_pcm_gettime(runtime, &status->tstamp);
918 }
919 _tstamp_end:
920 status->appl_ptr = runtime->control->appl_ptr;
921 status->hw_ptr = runtime->status->hw_ptr;
922 status->avail = snd_pcm_avail(substream);
923 status->delay = snd_pcm_running(substream) ?
924 snd_pcm_calc_delay(substream) : 0;
925 status->avail_max = runtime->avail_max;
926 status->overrange = runtime->overrange;
927 runtime->avail_max = 0;
928 runtime->overrange = 0;
929 _end:
930 snd_pcm_stream_unlock_irq(substream);
931 return 0;
932 }
933
934 static int snd_pcm_status_user(struct snd_pcm_substream *substream,
935 struct snd_pcm_status __user * _status,
936 bool ext)
937 {
938 struct snd_pcm_status status;
939 int res;
940
941 memset(&status, 0, sizeof(status));
942 /*
943 * with extension, parameters are read/write,
944 * get audio_tstamp_data from user,
945 * ignore rest of status structure
946 */
947 if (ext && get_user(status.audio_tstamp_data,
948 (u32 __user *)(&_status->audio_tstamp_data)))
949 return -EFAULT;
950 res = snd_pcm_status(substream, &status);
951 if (res < 0)
952 return res;
953 if (copy_to_user(_status, &status, sizeof(status)))
954 return -EFAULT;
955 return 0;
956 }
957
958 static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
959 struct snd_pcm_channel_info * info)
960 {
961 struct snd_pcm_runtime *runtime;
962 unsigned int channel;
963
964 channel = info->channel;
965 runtime = substream->runtime;
966 snd_pcm_stream_lock_irq(substream);
967 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
968 snd_pcm_stream_unlock_irq(substream);
969 return -EBADFD;
970 }
971 snd_pcm_stream_unlock_irq(substream);
972 if (channel >= runtime->channels)
973 return -EINVAL;
974 memset(info, 0, sizeof(*info));
975 info->channel = channel;
976 return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
977 }
978
979 static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
980 struct snd_pcm_channel_info __user * _info)
981 {
982 struct snd_pcm_channel_info info;
983 int res;
984
985 if (copy_from_user(&info, _info, sizeof(info)))
986 return -EFAULT;
987 res = snd_pcm_channel_info(substream, &info);
988 if (res < 0)
989 return res;
990 if (copy_to_user(_info, &info, sizeof(info)))
991 return -EFAULT;
992 return 0;
993 }
994
995 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
996 {
997 struct snd_pcm_runtime *runtime = substream->runtime;
998 if (runtime->trigger_master == NULL)
999 return;
1000 if (runtime->trigger_master == substream) {
1001 if (!runtime->trigger_tstamp_latched)
1002 snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
1003 } else {
1004 snd_pcm_trigger_tstamp(runtime->trigger_master);
1005 runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
1006 }
1007 runtime->trigger_master = NULL;
1008 }
1009
1010 struct action_ops {
1011 int (*pre_action)(struct snd_pcm_substream *substream, int state);
1012 int (*do_action)(struct snd_pcm_substream *substream, int state);
1013 void (*undo_action)(struct snd_pcm_substream *substream, int state);
1014 void (*post_action)(struct snd_pcm_substream *substream, int state);
1015 };
1016
1017 /*
1018 * this functions is core for handling of linked stream
1019 * Note: the stream state might be changed also on failure
1020 * Note2: call with calling stream lock + link lock
1021 */
1022 static int snd_pcm_action_group(const struct action_ops *ops,
1023 struct snd_pcm_substream *substream,
1024 int state, int do_lock)
1025 {
1026 struct snd_pcm_substream *s = NULL;
1027 struct snd_pcm_substream *s1;
1028 int res = 0, depth = 1;
1029
1030 snd_pcm_group_for_each_entry(s, substream) {
1031 if (do_lock && s != substream) {
1032 if (s->pcm->nonatomic)
1033 mutex_lock_nested(&s->self_group.mutex, depth);
1034 else
1035 spin_lock_nested(&s->self_group.lock, depth);
1036 depth++;
1037 }
1038 res = ops->pre_action(s, state);
1039 if (res < 0)
1040 goto _unlock;
1041 }
1042 snd_pcm_group_for_each_entry(s, substream) {
1043 res = ops->do_action(s, state);
1044 if (res < 0) {
1045 if (ops->undo_action) {
1046 snd_pcm_group_for_each_entry(s1, substream) {
1047 if (s1 == s) /* failed stream */
1048 break;
1049 ops->undo_action(s1, state);
1050 }
1051 }
1052 s = NULL; /* unlock all */
1053 goto _unlock;
1054 }
1055 }
1056 snd_pcm_group_for_each_entry(s, substream) {
1057 ops->post_action(s, state);
1058 }
1059 _unlock:
1060 if (do_lock) {
1061 /* unlock streams */
1062 snd_pcm_group_for_each_entry(s1, substream) {
1063 if (s1 != substream) {
1064 if (s1->pcm->nonatomic)
1065 mutex_unlock(&s1->self_group.mutex);
1066 else
1067 spin_unlock(&s1->self_group.lock);
1068 }
1069 if (s1 == s) /* end */
1070 break;
1071 }
1072 }
1073 return res;
1074 }
1075
1076 /*
1077 * Note: call with stream lock
1078 */
1079 static int snd_pcm_action_single(const struct action_ops *ops,
1080 struct snd_pcm_substream *substream,
1081 int state)
1082 {
1083 int res;
1084
1085 res = ops->pre_action(substream, state);
1086 if (res < 0)
1087 return res;
1088 res = ops->do_action(substream, state);
1089 if (res == 0)
1090 ops->post_action(substream, state);
1091 else if (ops->undo_action)
1092 ops->undo_action(substream, state);
1093 return res;
1094 }
1095
1096 static void snd_pcm_group_assign(struct snd_pcm_substream *substream,
1097 struct snd_pcm_group *new_group)
1098 {
1099 substream->group = new_group;
1100 list_move(&substream->link_list, &new_group->substreams);
1101 }
1102
1103 /*
1104 * Unref and unlock the group, but keep the stream lock;
1105 * when the group becomes empty and no longer referred, destroy itself
1106 */
1107 static void snd_pcm_group_unref(struct snd_pcm_group *group,
1108 struct snd_pcm_substream *substream)
1109 {
1110 bool do_free;
1111
1112 if (!group)
1113 return;
1114 do_free = refcount_dec_and_test(&group->refs) &&
1115 list_empty(&group->substreams);
1116 snd_pcm_group_unlock(group, substream->pcm->nonatomic);
1117 if (do_free)
1118 kfree(group);
1119 }
1120
1121 /*
1122 * Lock the group inside a stream lock and reference it;
1123 * return the locked group object, or NULL if not linked
1124 */
1125 static struct snd_pcm_group *
1126 snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
1127 {
1128 bool nonatomic = substream->pcm->nonatomic;
1129 struct snd_pcm_group *group;
1130 bool trylock;
1131
1132 for (;;) {
1133 if (!snd_pcm_stream_linked(substream))
1134 return NULL;
1135 group = substream->group;
1136 /* block freeing the group object */
1137 refcount_inc(&group->refs);
1138
1139 trylock = nonatomic ? mutex_trylock(&group->mutex) :
1140 spin_trylock(&group->lock);
1141 if (trylock)
1142 break; /* OK */
1143
1144 /* re-lock for avoiding ABBA deadlock */
1145 snd_pcm_stream_unlock(substream);
1146 snd_pcm_group_lock(group, nonatomic);
1147 snd_pcm_stream_lock(substream);
1148
1149 /* check the group again; the above opens a small race window */
1150 if (substream->group == group)
1151 break; /* OK */
1152 /* group changed, try again */
1153 snd_pcm_group_unref(group, substream);
1154 }
1155 return group;
1156 }
1157
1158 /*
1159 * Note: call with stream lock
1160 */
1161 static int snd_pcm_action(const struct action_ops *ops,
1162 struct snd_pcm_substream *substream,
1163 int state)
1164 {
1165 struct snd_pcm_group *group;
1166 int res;
1167
1168 group = snd_pcm_stream_group_ref(substream);
1169 if (group)
1170 res = snd_pcm_action_group(ops, substream, state, 1);
1171 else
1172 res = snd_pcm_action_single(ops, substream, state);
1173 snd_pcm_group_unref(group, substream);
1174 return res;
1175 }
1176
1177 /*
1178 * Note: don't use any locks before
1179 */
1180 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
1181 struct snd_pcm_substream *substream,
1182 int state)
1183 {
1184 int res;
1185
1186 snd_pcm_stream_lock_irq(substream);
1187 res = snd_pcm_action(ops, substream, state);
1188 snd_pcm_stream_unlock_irq(substream);
1189 return res;
1190 }
1191
1192 /*
1193 */
1194 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1195 struct snd_pcm_substream *substream,
1196 int state)
1197 {
1198 int res;
1199
1200 /* Guarantee the group members won't change during non-atomic action */
1201 down_read(&snd_pcm_link_rwsem);
1202 if (snd_pcm_stream_linked(substream))
1203 res = snd_pcm_action_group(ops, substream, state, 0);
1204 else
1205 res = snd_pcm_action_single(ops, substream, state);
1206 up_read(&snd_pcm_link_rwsem);
1207 return res;
1208 }
1209
1210 /*
1211 * start callbacks
1212 */
1213 static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
1214 {
1215 struct snd_pcm_runtime *runtime = substream->runtime;
1216 if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
1217 return -EBADFD;
1218 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1219 !snd_pcm_playback_data(substream))
1220 return -EPIPE;
1221 runtime->trigger_tstamp_latched = false;
1222 runtime->trigger_master = substream;
1223 return 0;
1224 }
1225
1226 static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
1227 {
1228 if (substream->runtime->trigger_master != substream)
1229 return 0;
1230 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
1231 }
1232
1233 static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
1234 {
1235 if (substream->runtime->trigger_master == substream)
1236 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1237 }
1238
1239 static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
1240 {
1241 struct snd_pcm_runtime *runtime = substream->runtime;
1242 snd_pcm_trigger_tstamp(substream);
1243 runtime->hw_ptr_jiffies = jiffies;
1244 runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
1245 runtime->rate;
1246 runtime->status->state = state;
1247 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1248 runtime->silence_size > 0)
1249 snd_pcm_playback_silence(substream, ULONG_MAX);
1250 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
1251 }
1252
1253 static const struct action_ops snd_pcm_action_start = {
1254 .pre_action = snd_pcm_pre_start,
1255 .do_action = snd_pcm_do_start,
1256 .undo_action = snd_pcm_undo_start,
1257 .post_action = snd_pcm_post_start
1258 };
1259
1260 /**
1261 * snd_pcm_start - start all linked streams
1262 * @substream: the PCM substream instance
1263 *
1264 * Return: Zero if successful, or a negative error code.
1265 * The stream lock must be acquired before calling this function.
1266 */
1267 int snd_pcm_start(struct snd_pcm_substream *substream)
1268 {
1269 return snd_pcm_action(&snd_pcm_action_start, substream,
1270 SNDRV_PCM_STATE_RUNNING);
1271 }
1272
1273 /* take the stream lock and start the streams */
1274 static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
1275 {
1276 return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
1277 SNDRV_PCM_STATE_RUNNING);
1278 }
1279
1280 /*
1281 * stop callbacks
1282 */
1283 static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
1284 {
1285 struct snd_pcm_runtime *runtime = substream->runtime;
1286 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1287 return -EBADFD;
1288 runtime->trigger_master = substream;
1289 return 0;
1290 }
1291
1292 static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
1293 {
1294 if (substream->runtime->trigger_master == substream &&
1295 snd_pcm_running(substream))
1296 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1297 return 0; /* unconditonally stop all substreams */
1298 }
1299
1300 static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
1301 {
1302 struct snd_pcm_runtime *runtime = substream->runtime;
1303 if (runtime->status->state != state) {
1304 snd_pcm_trigger_tstamp(substream);
1305 runtime->status->state = state;
1306 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
1307 }
1308 wake_up(&runtime->sleep);
1309 wake_up(&runtime->tsleep);
1310 }
1311
1312 static const struct action_ops snd_pcm_action_stop = {
1313 .pre_action = snd_pcm_pre_stop,
1314 .do_action = snd_pcm_do_stop,
1315 .post_action = snd_pcm_post_stop
1316 };
1317
1318 /**
1319 * snd_pcm_stop - try to stop all running streams in the substream group
1320 * @substream: the PCM substream instance
1321 * @state: PCM state after stopping the stream
1322 *
1323 * The state of each stream is then changed to the given state unconditionally.
1324 *
1325 * Return: Zero if successful, or a negative error code.
1326 */
1327 int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
1328 {
1329 return snd_pcm_action(&snd_pcm_action_stop, substream, state);
1330 }
1331 EXPORT_SYMBOL(snd_pcm_stop);
1332
1333 /**
1334 * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1335 * @substream: the PCM substream
1336 *
1337 * After stopping, the state is changed to SETUP.
1338 * Unlike snd_pcm_stop(), this affects only the given stream.
1339 *
1340 * Return: Zero if succesful, or a negative error code.
1341 */
1342 int snd_pcm_drain_done(struct snd_pcm_substream *substream)
1343 {
1344 return snd_pcm_action_single(&snd_pcm_action_stop, substream,
1345 SNDRV_PCM_STATE_SETUP);
1346 }
1347
1348 /**
1349 * snd_pcm_stop_xrun - stop the running streams as XRUN
1350 * @substream: the PCM substream instance
1351 *
1352 * This stops the given running substream (and all linked substreams) as XRUN.
1353 * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1354 *
1355 * Return: Zero if successful, or a negative error code.
1356 */
1357 int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
1358 {
1359 unsigned long flags;
1360
1361 snd_pcm_stream_lock_irqsave(substream, flags);
1362 if (substream->runtime && snd_pcm_running(substream))
1363 __snd_pcm_xrun(substream);
1364 snd_pcm_stream_unlock_irqrestore(substream, flags);
1365 return 0;
1366 }
1367 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
1368
1369 /*
1370 * pause callbacks
1371 */
1372 static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
1373 {
1374 struct snd_pcm_runtime *runtime = substream->runtime;
1375 if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
1376 return -ENOSYS;
1377 if (push) {
1378 if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
1379 return -EBADFD;
1380 } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
1381 return -EBADFD;
1382 runtime->trigger_master = substream;
1383 return 0;
1384 }
1385
1386 static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
1387 {
1388 if (substream->runtime->trigger_master != substream)
1389 return 0;
1390 /* some drivers might use hw_ptr to recover from the pause -
1391 update the hw_ptr now */
1392 if (push)
1393 snd_pcm_update_hw_ptr(substream);
1394 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1395 * a delta between the current jiffies, this gives a large enough
1396 * delta, effectively to skip the check once.
1397 */
1398 substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
1399 return substream->ops->trigger(substream,
1400 push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
1401 SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
1402 }
1403
1404 static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
1405 {
1406 if (substream->runtime->trigger_master == substream)
1407 substream->ops->trigger(substream,
1408 push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
1409 SNDRV_PCM_TRIGGER_PAUSE_PUSH);
1410 }
1411
1412 static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
1413 {
1414 struct snd_pcm_runtime *runtime = substream->runtime;
1415 snd_pcm_trigger_tstamp(substream);
1416 if (push) {
1417 runtime->status->state = SNDRV_PCM_STATE_PAUSED;
1418 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
1419 wake_up(&runtime->sleep);
1420 wake_up(&runtime->tsleep);
1421 } else {
1422 runtime->status->state = SNDRV_PCM_STATE_RUNNING;
1423 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
1424 }
1425 }
1426
1427 static const struct action_ops snd_pcm_action_pause = {
1428 .pre_action = snd_pcm_pre_pause,
1429 .do_action = snd_pcm_do_pause,
1430 .undo_action = snd_pcm_undo_pause,
1431 .post_action = snd_pcm_post_pause
1432 };
1433
1434 /*
1435 * Push/release the pause for all linked streams.
1436 */
1437 static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
1438 {
1439 return snd_pcm_action(&snd_pcm_action_pause, substream, push);
1440 }
1441
1442 #ifdef CONFIG_PM
1443 /* suspend */
1444
1445 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
1446 {
1447 struct snd_pcm_runtime *runtime = substream->runtime;
1448 switch (runtime->status->state) {
1449 case SNDRV_PCM_STATE_SUSPENDED:
1450 return -EBUSY;
1451 /* unresumable PCM state; return -EBUSY for skipping suspend */
1452 case SNDRV_PCM_STATE_OPEN:
1453 case SNDRV_PCM_STATE_SETUP:
1454 case SNDRV_PCM_STATE_DISCONNECTED:
1455 return -EBUSY;
1456 }
1457 runtime->trigger_master = substream;
1458 return 0;
1459 }
1460
1461 static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
1462 {
1463 struct snd_pcm_runtime *runtime = substream->runtime;
1464 if (runtime->trigger_master != substream)
1465 return 0;
1466 if (! snd_pcm_running(substream))
1467 return 0;
1468 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1469 return 0; /* suspend unconditionally */
1470 }
1471
1472 static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
1473 {
1474 struct snd_pcm_runtime *runtime = substream->runtime;
1475 snd_pcm_trigger_tstamp(substream);
1476 runtime->status->suspended_state = runtime->status->state;
1477 runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
1478 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
1479 wake_up(&runtime->sleep);
1480 wake_up(&runtime->tsleep);
1481 }
1482
1483 static const struct action_ops snd_pcm_action_suspend = {
1484 .pre_action = snd_pcm_pre_suspend,
1485 .do_action = snd_pcm_do_suspend,
1486 .post_action = snd_pcm_post_suspend
1487 };
1488
1489 /*
1490 * snd_pcm_suspend - trigger SUSPEND to all linked streams
1491 * @substream: the PCM substream
1492 *
1493 * After this call, all streams are changed to SUSPENDED state.
1494 *
1495 * Return: Zero if successful, or a negative error code.
1496 */
1497 static int snd_pcm_suspend(struct snd_pcm_substream *substream)
1498 {
1499 int err;
1500 unsigned long flags;
1501
1502 snd_pcm_stream_lock_irqsave(substream, flags);
1503 err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
1504 snd_pcm_stream_unlock_irqrestore(substream, flags);
1505 return err;
1506 }
1507
1508 /**
1509 * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1510 * @pcm: the PCM instance
1511 *
1512 * After this call, all streams are changed to SUSPENDED state.
1513 *
1514 * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1515 */
1516 int snd_pcm_suspend_all(struct snd_pcm *pcm)
1517 {
1518 struct snd_pcm_substream *substream;
1519 int stream, err = 0;
1520
1521 if (! pcm)
1522 return 0;
1523
1524 for (stream = 0; stream < 2; stream++) {
1525 for (substream = pcm->streams[stream].substream;
1526 substream; substream = substream->next) {
1527 /* FIXME: the open/close code should lock this as well */
1528 if (substream->runtime == NULL)
1529 continue;
1530
1531 /*
1532 * Skip BE dai link PCM's that are internal and may
1533 * not have their substream ops set.
1534 */
1535 if (!substream->ops)
1536 continue;
1537
1538 err = snd_pcm_suspend(substream);
1539 if (err < 0 && err != -EBUSY)
1540 return err;
1541 }
1542 }
1543 return 0;
1544 }
1545 EXPORT_SYMBOL(snd_pcm_suspend_all);
1546
1547 /* resume */
1548
1549 static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
1550 {
1551 struct snd_pcm_runtime *runtime = substream->runtime;
1552 if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
1553 return -ENOSYS;
1554 runtime->trigger_master = substream;
1555 return 0;
1556 }
1557
1558 static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
1559 {
1560 struct snd_pcm_runtime *runtime = substream->runtime;
1561 if (runtime->trigger_master != substream)
1562 return 0;
1563 /* DMA not running previously? */
1564 if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING &&
1565 (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING ||
1566 substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
1567 return 0;
1568 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
1569 }
1570
1571 static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
1572 {
1573 if (substream->runtime->trigger_master == substream &&
1574 snd_pcm_running(substream))
1575 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1576 }
1577
1578 static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
1579 {
1580 struct snd_pcm_runtime *runtime = substream->runtime;
1581 snd_pcm_trigger_tstamp(substream);
1582 runtime->status->state = runtime->status->suspended_state;
1583 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
1584 }
1585
1586 static const struct action_ops snd_pcm_action_resume = {
1587 .pre_action = snd_pcm_pre_resume,
1588 .do_action = snd_pcm_do_resume,
1589 .undo_action = snd_pcm_undo_resume,
1590 .post_action = snd_pcm_post_resume
1591 };
1592
1593 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1594 {
1595 return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
1596 }
1597
1598 #else
1599
1600 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1601 {
1602 return -ENOSYS;
1603 }
1604
1605 #endif /* CONFIG_PM */
1606
1607 /*
1608 * xrun ioctl
1609 *
1610 * Change the RUNNING stream(s) to XRUN state.
1611 */
1612 static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1613 {
1614 struct snd_pcm_runtime *runtime = substream->runtime;
1615 int result;
1616
1617 snd_pcm_stream_lock_irq(substream);
1618 switch (runtime->status->state) {
1619 case SNDRV_PCM_STATE_XRUN:
1620 result = 0; /* already there */
1621 break;
1622 case SNDRV_PCM_STATE_RUNNING:
1623 __snd_pcm_xrun(substream);
1624 result = 0;
1625 break;
1626 default:
1627 result = -EBADFD;
1628 }
1629 snd_pcm_stream_unlock_irq(substream);
1630 return result;
1631 }
1632
1633 /*
1634 * reset ioctl
1635 */
1636 static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
1637 {
1638 struct snd_pcm_runtime *runtime = substream->runtime;
1639 switch (runtime->status->state) {
1640 case SNDRV_PCM_STATE_RUNNING:
1641 case SNDRV_PCM_STATE_PREPARED:
1642 case SNDRV_PCM_STATE_PAUSED:
1643 case SNDRV_PCM_STATE_SUSPENDED:
1644 return 0;
1645 default:
1646 return -EBADFD;
1647 }
1648 }
1649
1650 static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
1651 {
1652 struct snd_pcm_runtime *runtime = substream->runtime;
1653 int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
1654 if (err < 0)
1655 return err;
1656 runtime->hw_ptr_base = 0;
1657 runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
1658 runtime->status->hw_ptr % runtime->period_size;
1659 runtime->silence_start = runtime->status->hw_ptr;
1660 runtime->silence_filled = 0;
1661 return 0;
1662 }
1663
1664 static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
1665 {
1666 struct snd_pcm_runtime *runtime = substream->runtime;
1667 runtime->control->appl_ptr = runtime->status->hw_ptr;
1668 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1669 runtime->silence_size > 0)
1670 snd_pcm_playback_silence(substream, ULONG_MAX);
1671 }
1672
1673 static const struct action_ops snd_pcm_action_reset = {
1674 .pre_action = snd_pcm_pre_reset,
1675 .do_action = snd_pcm_do_reset,
1676 .post_action = snd_pcm_post_reset
1677 };
1678
1679 static int snd_pcm_reset(struct snd_pcm_substream *substream)
1680 {
1681 return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
1682 }
1683
1684 /*
1685 * prepare ioctl
1686 */
1687 /* we use the second argument for updating f_flags */
1688 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
1689 int f_flags)
1690 {
1691 struct snd_pcm_runtime *runtime = substream->runtime;
1692 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1693 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1694 return -EBADFD;
1695 if (snd_pcm_running(substream))
1696 return -EBUSY;
1697 substream->f_flags = f_flags;
1698 return 0;
1699 }
1700
1701 static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
1702 {
1703 int err;
1704 err = substream->ops->prepare(substream);
1705 if (err < 0)
1706 return err;
1707 return snd_pcm_do_reset(substream, 0);
1708 }
1709
1710 static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
1711 {
1712 struct snd_pcm_runtime *runtime = substream->runtime;
1713 runtime->control->appl_ptr = runtime->status->hw_ptr;
1714 snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
1715 }
1716
1717 static const struct action_ops snd_pcm_action_prepare = {
1718 .pre_action = snd_pcm_pre_prepare,
1719 .do_action = snd_pcm_do_prepare,
1720 .post_action = snd_pcm_post_prepare
1721 };
1722
1723 /**
1724 * snd_pcm_prepare - prepare the PCM substream to be triggerable
1725 * @substream: the PCM substream instance
1726 * @file: file to refer f_flags
1727 *
1728 * Return: Zero if successful, or a negative error code.
1729 */
1730 static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1731 struct file *file)
1732 {
1733 int f_flags;
1734
1735 if (file)
1736 f_flags = file->f_flags;
1737 else
1738 f_flags = substream->f_flags;
1739
1740 snd_pcm_stream_lock_irq(substream);
1741 switch (substream->runtime->status->state) {
1742 case SNDRV_PCM_STATE_PAUSED:
1743 snd_pcm_pause(substream, 0);
1744 /* fallthru */
1745 case SNDRV_PCM_STATE_SUSPENDED:
1746 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1747 break;
1748 }
1749 snd_pcm_stream_unlock_irq(substream);
1750
1751 return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
1752 substream, f_flags);
1753 }
1754
1755 /*
1756 * drain ioctl
1757 */
1758
1759 static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
1760 {
1761 struct snd_pcm_runtime *runtime = substream->runtime;
1762 switch (runtime->status->state) {
1763 case SNDRV_PCM_STATE_OPEN:
1764 case SNDRV_PCM_STATE_DISCONNECTED:
1765 case SNDRV_PCM_STATE_SUSPENDED:
1766 return -EBADFD;
1767 }
1768 runtime->trigger_master = substream;
1769 return 0;
1770 }
1771
1772 static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
1773 {
1774 struct snd_pcm_runtime *runtime = substream->runtime;
1775 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1776 switch (runtime->status->state) {
1777 case SNDRV_PCM_STATE_PREPARED:
1778 /* start playback stream if possible */
1779 if (! snd_pcm_playback_empty(substream)) {
1780 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
1781 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
1782 } else {
1783 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1784 }
1785 break;
1786 case SNDRV_PCM_STATE_RUNNING:
1787 runtime->status->state = SNDRV_PCM_STATE_DRAINING;
1788 break;
1789 case SNDRV_PCM_STATE_XRUN:
1790 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1791 break;
1792 default:
1793 break;
1794 }
1795 } else {
1796 /* stop running stream */
1797 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
1798 int new_state = snd_pcm_capture_avail(runtime) > 0 ?
1799 SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
1800 snd_pcm_do_stop(substream, new_state);
1801 snd_pcm_post_stop(substream, new_state);
1802 }
1803 }
1804
1805 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
1806 runtime->trigger_master == substream &&
1807 (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
1808 return substream->ops->trigger(substream,
1809 SNDRV_PCM_TRIGGER_DRAIN);
1810
1811 return 0;
1812 }
1813
1814 static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
1815 {
1816 }
1817
1818 static const struct action_ops snd_pcm_action_drain_init = {
1819 .pre_action = snd_pcm_pre_drain_init,
1820 .do_action = snd_pcm_do_drain_init,
1821 .post_action = snd_pcm_post_drain_init
1822 };
1823
1824 /*
1825 * Drain the stream(s).
1826 * When the substream is linked, sync until the draining of all playback streams
1827 * is finished.
1828 * After this call, all streams are supposed to be either SETUP or DRAINING
1829 * (capture only) state.
1830 */
1831 static int snd_pcm_drain(struct snd_pcm_substream *substream,
1832 struct file *file)
1833 {
1834 struct snd_card *card;
1835 struct snd_pcm_runtime *runtime;
1836 struct snd_pcm_substream *s;
1837 struct snd_pcm_group *group;
1838 wait_queue_entry_t wait;
1839 int result = 0;
1840 int nonblock = 0;
1841
1842 card = substream->pcm->card;
1843 runtime = substream->runtime;
1844
1845 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1846 return -EBADFD;
1847
1848 if (file) {
1849 if (file->f_flags & O_NONBLOCK)
1850 nonblock = 1;
1851 } else if (substream->f_flags & O_NONBLOCK)
1852 nonblock = 1;
1853
1854 snd_pcm_stream_lock_irq(substream);
1855 /* resume pause */
1856 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1857 snd_pcm_pause(substream, 0);
1858
1859 /* pre-start/stop - all running streams are changed to DRAINING state */
1860 result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
1861 if (result < 0)
1862 goto unlock;
1863 /* in non-blocking, we don't wait in ioctl but let caller poll */
1864 if (nonblock) {
1865 result = -EAGAIN;
1866 goto unlock;
1867 }
1868
1869 for (;;) {
1870 long tout;
1871 struct snd_pcm_runtime *to_check;
1872 if (signal_pending(current)) {
1873 result = -ERESTARTSYS;
1874 break;
1875 }
1876 /* find a substream to drain */
1877 to_check = NULL;
1878 group = snd_pcm_stream_group_ref(substream);
1879 snd_pcm_group_for_each_entry(s, substream) {
1880 if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
1881 continue;
1882 runtime = s->runtime;
1883 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
1884 to_check = runtime;
1885 break;
1886 }
1887 }
1888 snd_pcm_group_unref(group, substream);
1889 if (!to_check)
1890 break; /* all drained */
1891 init_waitqueue_entry(&wait, current);
1892 add_wait_queue(&to_check->sleep, &wait);
1893 snd_pcm_stream_unlock_irq(substream);
1894 if (runtime->no_period_wakeup)
1895 tout = MAX_SCHEDULE_TIMEOUT;
1896 else {
1897 tout = 10;
1898 if (runtime->rate) {
1899 long t = runtime->period_size * 2 / runtime->rate;
1900 tout = max(t, tout);
1901 }
1902 tout = msecs_to_jiffies(tout * 1000);
1903 }
1904 tout = schedule_timeout_interruptible(tout);
1905
1906 snd_pcm_stream_lock_irq(substream);
1907 group = snd_pcm_stream_group_ref(substream);
1908 snd_pcm_group_for_each_entry(s, substream) {
1909 if (s->runtime == to_check) {
1910 remove_wait_queue(&to_check->sleep, &wait);
1911 break;
1912 }
1913 }
1914 snd_pcm_group_unref(group, substream);
1915
1916 if (card->shutdown) {
1917 result = -ENODEV;
1918 break;
1919 }
1920 if (tout == 0) {
1921 if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
1922 result = -ESTRPIPE;
1923 else {
1924 dev_dbg(substream->pcm->card->dev,
1925 "playback drain error (DMA or IRQ trouble?)\n");
1926 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1927 result = -EIO;
1928 }
1929 break;
1930 }
1931 }
1932
1933 unlock:
1934 snd_pcm_stream_unlock_irq(substream);
1935
1936 return result;
1937 }
1938
1939 /*
1940 * drop ioctl
1941 *
1942 * Immediately put all linked substreams into SETUP state.
1943 */
1944 static int snd_pcm_drop(struct snd_pcm_substream *substream)
1945 {
1946 struct snd_pcm_runtime *runtime;
1947 int result = 0;
1948
1949 if (PCM_RUNTIME_CHECK(substream))
1950 return -ENXIO;
1951 runtime = substream->runtime;
1952
1953 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1954 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1955 return -EBADFD;
1956
1957 snd_pcm_stream_lock_irq(substream);
1958 /* resume pause */
1959 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1960 snd_pcm_pause(substream, 0);
1961
1962 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1963 /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
1964 snd_pcm_stream_unlock_irq(substream);
1965
1966 return result;
1967 }
1968
1969
1970 static bool is_pcm_file(struct file *file)
1971 {
1972 struct inode *inode = file_inode(file);
1973 struct snd_pcm *pcm;
1974 unsigned int minor;
1975
1976 if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
1977 return false;
1978 minor = iminor(inode);
1979 pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
1980 if (!pcm)
1981 pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
1982 if (!pcm)
1983 return false;
1984 snd_card_unref(pcm->card);
1985 return true;
1986 }
1987
1988 /*
1989 * PCM link handling
1990 */
1991 static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1992 {
1993 int res = 0;
1994 struct snd_pcm_file *pcm_file;
1995 struct snd_pcm_substream *substream1;
1996 struct snd_pcm_group *group, *target_group;
1997 bool nonatomic = substream->pcm->nonatomic;
1998 struct fd f = fdget(fd);
1999
2000 if (!f.file)
2001 return -EBADFD;
2002 if (!is_pcm_file(f.file)) {
2003 res = -EBADFD;
2004 goto _badf;
2005 }
2006 pcm_file = f.file->private_data;
2007 substream1 = pcm_file->substream;
2008 group = kzalloc(sizeof(*group), GFP_KERNEL);
2009 if (!group) {
2010 res = -ENOMEM;
2011 goto _nolock;
2012 }
2013 snd_pcm_group_init(group);
2014
2015 down_write(&snd_pcm_link_rwsem);
2016 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
2017 substream->runtime->status->state != substream1->runtime->status->state ||
2018 substream->pcm->nonatomic != substream1->pcm->nonatomic) {
2019 res = -EBADFD;
2020 goto _end;
2021 }
2022 if (snd_pcm_stream_linked(substream1)) {
2023 res = -EALREADY;
2024 goto _end;
2025 }
2026
2027 snd_pcm_stream_lock_irq(substream);
2028 if (!snd_pcm_stream_linked(substream)) {
2029 snd_pcm_group_assign(substream, group);
2030 group = NULL; /* assigned, don't free this one below */
2031 }
2032 target_group = substream->group;
2033 snd_pcm_stream_unlock_irq(substream);
2034
2035 snd_pcm_group_lock_irq(target_group, nonatomic);
2036 snd_pcm_stream_lock(substream1);
2037 snd_pcm_group_assign(substream1, target_group);
2038 snd_pcm_stream_unlock(substream1);
2039 snd_pcm_group_unlock_irq(target_group, nonatomic);
2040 _end:
2041 up_write(&snd_pcm_link_rwsem);
2042 _nolock:
2043 kfree(group);
2044 _badf:
2045 fdput(f);
2046 return res;
2047 }
2048
2049 static void relink_to_local(struct snd_pcm_substream *substream)
2050 {
2051 snd_pcm_stream_lock(substream);
2052 snd_pcm_group_assign(substream, &substream->self_group);
2053 snd_pcm_stream_unlock(substream);
2054 }
2055
2056 static int snd_pcm_unlink(struct snd_pcm_substream *substream)
2057 {
2058 struct snd_pcm_group *group;
2059 bool nonatomic = substream->pcm->nonatomic;
2060 bool do_free = false;
2061 int res = 0;
2062
2063 down_write(&snd_pcm_link_rwsem);
2064
2065 if (!snd_pcm_stream_linked(substream)) {
2066 res = -EALREADY;
2067 goto _end;
2068 }
2069
2070 group = substream->group;
2071 snd_pcm_group_lock_irq(group, nonatomic);
2072
2073 relink_to_local(substream);
2074
2075 /* detach the last stream, too */
2076 if (list_is_singular(&group->substreams)) {
2077 relink_to_local(list_first_entry(&group->substreams,
2078 struct snd_pcm_substream,
2079 link_list));
2080 do_free = !refcount_read(&group->refs);
2081 }
2082
2083 snd_pcm_group_unlock_irq(group, nonatomic);
2084 if (do_free)
2085 kfree(group);
2086
2087 _end:
2088 up_write(&snd_pcm_link_rwsem);
2089 return res;
2090 }
2091
2092 /*
2093 * hw configurator
2094 */
2095 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
2096 struct snd_pcm_hw_rule *rule)
2097 {
2098 struct snd_interval t;
2099 snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
2100 hw_param_interval_c(params, rule->deps[1]), &t);
2101 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2102 }
2103
2104 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
2105 struct snd_pcm_hw_rule *rule)
2106 {
2107 struct snd_interval t;
2108 snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
2109 hw_param_interval_c(params, rule->deps[1]), &t);
2110 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2111 }
2112
2113 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
2114 struct snd_pcm_hw_rule *rule)
2115 {
2116 struct snd_interval t;
2117 snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
2118 hw_param_interval_c(params, rule->deps[1]),
2119 (unsigned long) rule->private, &t);
2120 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2121 }
2122
2123 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
2124 struct snd_pcm_hw_rule *rule)
2125 {
2126 struct snd_interval t;
2127 snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
2128 (unsigned long) rule->private,
2129 hw_param_interval_c(params, rule->deps[1]), &t);
2130 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2131 }
2132
2133 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
2134 struct snd_pcm_hw_rule *rule)
2135 {
2136 unsigned int k;
2137 const struct snd_interval *i =
2138 hw_param_interval_c(params, rule->deps[0]);
2139 struct snd_mask m;
2140 struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2141 snd_mask_any(&m);
2142 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2143 int bits;
2144 if (! snd_mask_test(mask, k))
2145 continue;
2146 bits = snd_pcm_format_physical_width(k);
2147 if (bits <= 0)
2148 continue; /* ignore invalid formats */
2149 if ((unsigned)bits < i->min || (unsigned)bits > i->max)
2150 snd_mask_reset(&m, k);
2151 }
2152 return snd_mask_refine(mask, &m);
2153 }
2154
2155 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
2156 struct snd_pcm_hw_rule *rule)
2157 {
2158 struct snd_interval t;
2159 unsigned int k;
2160 t.min = UINT_MAX;
2161 t.max = 0;
2162 t.openmin = 0;
2163 t.openmax = 0;
2164 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2165 int bits;
2166 if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
2167 continue;
2168 bits = snd_pcm_format_physical_width(k);
2169 if (bits <= 0)
2170 continue; /* ignore invalid formats */
2171 if (t.min > (unsigned)bits)
2172 t.min = bits;
2173 if (t.max < (unsigned)bits)
2174 t.max = bits;
2175 }
2176 t.integer = 1;
2177 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2178 }
2179
2180 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
2181 #error "Change this table"
2182 #endif
2183
2184 static const unsigned int rates[] = {
2185 5512, 8000, 11025, 16000, 22050, 32000, 44100,
2186 48000, 64000, 88200, 96000, 176400, 192000
2187 };
2188
2189 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
2190 .count = ARRAY_SIZE(rates),
2191 .list = rates,
2192 };
2193
2194 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
2195 struct snd_pcm_hw_rule *rule)
2196 {
2197 struct snd_pcm_hardware *hw = rule->private;
2198 return snd_interval_list(hw_param_interval(params, rule->var),
2199 snd_pcm_known_rates.count,
2200 snd_pcm_known_rates.list, hw->rates);
2201 }
2202
2203 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
2204 struct snd_pcm_hw_rule *rule)
2205 {
2206 struct snd_interval t;
2207 struct snd_pcm_substream *substream = rule->private;
2208 t.min = 0;
2209 t.max = substream->buffer_bytes_max;
2210 t.openmin = 0;
2211 t.openmax = 0;
2212 t.integer = 1;
2213 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2214 }
2215
2216 int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
2217 {
2218 struct snd_pcm_runtime *runtime = substream->runtime;
2219 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
2220 int k, err;
2221
2222 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
2223 snd_mask_any(constrs_mask(constrs, k));
2224 }
2225
2226 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
2227 snd_interval_any(constrs_interval(constrs, k));
2228 }
2229
2230 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
2231 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
2232 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
2233 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
2234 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
2235
2236 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
2237 snd_pcm_hw_rule_format, NULL,
2238 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2239 if (err < 0)
2240 return err;
2241 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2242 snd_pcm_hw_rule_sample_bits, NULL,
2243 SNDRV_PCM_HW_PARAM_FORMAT,
2244 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2245 if (err < 0)
2246 return err;
2247 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2248 snd_pcm_hw_rule_div, NULL,
2249 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2250 if (err < 0)
2251 return err;
2252 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2253 snd_pcm_hw_rule_mul, NULL,
2254 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2255 if (err < 0)
2256 return err;
2257 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2258 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2259 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2260 if (err < 0)
2261 return err;
2262 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2263 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2264 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
2265 if (err < 0)
2266 return err;
2267 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2268 snd_pcm_hw_rule_div, NULL,
2269 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2270 if (err < 0)
2271 return err;
2272 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2273 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2274 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
2275 if (err < 0)
2276 return err;
2277 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2278 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2279 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
2280 if (err < 0)
2281 return err;
2282 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
2283 snd_pcm_hw_rule_div, NULL,
2284 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2285 if (err < 0)
2286 return err;
2287 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2288 snd_pcm_hw_rule_div, NULL,
2289 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2290 if (err < 0)
2291 return err;
2292 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2293 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2294 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2295 if (err < 0)
2296 return err;
2297 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2298 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2299 SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2300 if (err < 0)
2301 return err;
2302 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2303 snd_pcm_hw_rule_mul, NULL,
2304 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2305 if (err < 0)
2306 return err;
2307 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2308 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2309 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2310 if (err < 0)
2311 return err;
2312 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2313 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2314 SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2315 if (err < 0)
2316 return err;
2317 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2318 snd_pcm_hw_rule_muldivk, (void*) 8,
2319 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2320 if (err < 0)
2321 return err;
2322 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2323 snd_pcm_hw_rule_muldivk, (void*) 8,
2324 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2325 if (err < 0)
2326 return err;
2327 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
2328 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2329 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2330 if (err < 0)
2331 return err;
2332 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
2333 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2334 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2335 if (err < 0)
2336 return err;
2337 return 0;
2338 }
2339
2340 int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
2341 {
2342 struct snd_pcm_runtime *runtime = substream->runtime;
2343 struct snd_pcm_hardware *hw = &runtime->hw;
2344 int err;
2345 unsigned int mask = 0;
2346
2347 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2348 mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
2349 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2350 mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
2351 if (hw_support_mmap(substream)) {
2352 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2353 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
2354 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2355 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
2356 if (hw->info & SNDRV_PCM_INFO_COMPLEX)
2357 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX;
2358 }
2359 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
2360 if (err < 0)
2361 return err;
2362
2363 err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
2364 if (err < 0)
2365 return err;
2366
2367 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD);
2368 if (err < 0)
2369 return err;
2370
2371 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
2372 hw->channels_min, hw->channels_max);
2373 if (err < 0)
2374 return err;
2375
2376 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
2377 hw->rate_min, hw->rate_max);
2378 if (err < 0)
2379 return err;
2380
2381 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2382 hw->period_bytes_min, hw->period_bytes_max);
2383 if (err < 0)
2384 return err;
2385
2386 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
2387 hw->periods_min, hw->periods_max);
2388 if (err < 0)
2389 return err;
2390
2391 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2392 hw->period_bytes_min, hw->buffer_bytes_max);
2393 if (err < 0)
2394 return err;
2395
2396 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2397 snd_pcm_hw_rule_buffer_bytes_max, substream,
2398 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
2399 if (err < 0)
2400 return err;
2401
2402 /* FIXME: remove */
2403 if (runtime->dma_bytes) {
2404 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
2405 if (err < 0)
2406 return err;
2407 }
2408
2409 if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
2410 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2411 snd_pcm_hw_rule_rate, hw,
2412 SNDRV_PCM_HW_PARAM_RATE, -1);
2413 if (err < 0)
2414 return err;
2415 }
2416
2417 /* FIXME: this belong to lowlevel */
2418 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
2419
2420 return 0;
2421 }
2422
2423 static void pcm_release_private(struct snd_pcm_substream *substream)
2424 {
2425 if (snd_pcm_stream_linked(substream))
2426 snd_pcm_unlink(substream);
2427 }
2428
2429 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2430 {
2431 substream->ref_count--;
2432 if (substream->ref_count > 0)
2433 return;
2434
2435 snd_pcm_drop(substream);
2436 if (substream->hw_opened) {
2437 if (substream->ops->hw_free &&
2438 substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
2439 substream->ops->hw_free(substream);
2440 substream->ops->close(substream);
2441 substream->hw_opened = 0;
2442 }
2443 if (pm_qos_request_active(&substream->latency_pm_qos_req))
2444 pm_qos_remove_request(&substream->latency_pm_qos_req);
2445 if (substream->pcm_release) {
2446 substream->pcm_release(substream);
2447 substream->pcm_release = NULL;
2448 }
2449 snd_pcm_detach_substream(substream);
2450 }
2451 EXPORT_SYMBOL(snd_pcm_release_substream);
2452
2453 int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2454 struct file *file,
2455 struct snd_pcm_substream **rsubstream)
2456 {
2457 struct snd_pcm_substream *substream;
2458 int err;
2459
2460 err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2461 if (err < 0)
2462 return err;
2463 if (substream->ref_count > 1) {
2464 *rsubstream = substream;
2465 return 0;
2466 }
2467
2468 err = snd_pcm_hw_constraints_init(substream);
2469 if (err < 0) {
2470 pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
2471 goto error;
2472 }
2473
2474 if ((err = substream->ops->open(substream)) < 0)
2475 goto error;
2476
2477 substream->hw_opened = 1;
2478
2479 err = snd_pcm_hw_constraints_complete(substream);
2480 if (err < 0) {
2481 pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
2482 goto error;
2483 }
2484
2485 *rsubstream = substream;
2486 return 0;
2487
2488 error:
2489 snd_pcm_release_substream(substream);
2490 return err;
2491 }
2492 EXPORT_SYMBOL(snd_pcm_open_substream);
2493
2494 static int snd_pcm_open_file(struct file *file,
2495 struct snd_pcm *pcm,
2496 int stream)
2497 {
2498 struct snd_pcm_file *pcm_file;
2499 struct snd_pcm_substream *substream;
2500 int err;
2501
2502 err = snd_pcm_open_substream(pcm, stream, file, &substream);
2503 if (err < 0)
2504 return err;
2505
2506 pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2507 if (pcm_file == NULL) {
2508 snd_pcm_release_substream(substream);
2509 return -ENOMEM;
2510 }
2511 pcm_file->substream = substream;
2512 if (substream->ref_count == 1)
2513 substream->pcm_release = pcm_release_private;
2514 file->private_data = pcm_file;
2515
2516 return 0;
2517 }
2518
2519 static int snd_pcm_playback_open(struct inode *inode, struct file *file)
2520 {
2521 struct snd_pcm *pcm;
2522 int err = nonseekable_open(inode, file);
2523 if (err < 0)
2524 return err;
2525 pcm = snd_lookup_minor_data(iminor(inode),
2526 SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2527 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
2528 if (pcm)
2529 snd_card_unref(pcm->card);
2530 return err;
2531 }
2532
2533 static int snd_pcm_capture_open(struct inode *inode, struct file *file)
2534 {
2535 struct snd_pcm *pcm;
2536 int err = nonseekable_open(inode, file);
2537 if (err < 0)
2538 return err;
2539 pcm = snd_lookup_minor_data(iminor(inode),
2540 SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2541 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
2542 if (pcm)
2543 snd_card_unref(pcm->card);
2544 return err;
2545 }
2546
2547 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
2548 {
2549 int err;
2550 wait_queue_entry_t wait;
2551
2552 if (pcm == NULL) {
2553 err = -ENODEV;
2554 goto __error1;
2555 }
2556 err = snd_card_file_add(pcm->card, file);
2557 if (err < 0)
2558 goto __error1;
2559 if (!try_module_get(pcm->card->module)) {
2560 err = -EFAULT;
2561 goto __error2;
2562 }
2563 init_waitqueue_entry(&wait, current);
2564 add_wait_queue(&pcm->open_wait, &wait);
2565 mutex_lock(&pcm->open_mutex);
2566 while (1) {
2567 err = snd_pcm_open_file(file, pcm, stream);
2568 if (err >= 0)
2569 break;
2570 if (err == -EAGAIN) {
2571 if (file->f_flags & O_NONBLOCK) {
2572 err = -EBUSY;
2573 break;
2574 }
2575 } else
2576 break;
2577 set_current_state(TASK_INTERRUPTIBLE);
2578 mutex_unlock(&pcm->open_mutex);
2579 schedule();
2580 mutex_lock(&pcm->open_mutex);
2581 if (pcm->card->shutdown) {
2582 err = -ENODEV;
2583 break;
2584 }
2585 if (signal_pending(current)) {
2586 err = -ERESTARTSYS;
2587 break;
2588 }
2589 }
2590 remove_wait_queue(&pcm->open_wait, &wait);
2591 mutex_unlock(&pcm->open_mutex);
2592 if (err < 0)
2593 goto __error;
2594 return err;
2595
2596 __error:
2597 module_put(pcm->card->module);
2598 __error2:
2599 snd_card_file_remove(pcm->card, file);
2600 __error1:
2601 return err;
2602 }
2603
2604 static int snd_pcm_release(struct inode *inode, struct file *file)
2605 {
2606 struct snd_pcm *pcm;
2607 struct snd_pcm_substream *substream;
2608 struct snd_pcm_file *pcm_file;
2609
2610 pcm_file = file->private_data;
2611 substream = pcm_file->substream;
2612 if (snd_BUG_ON(!substream))
2613 return -ENXIO;
2614 pcm = substream->pcm;
2615 mutex_lock(&pcm->open_mutex);
2616 snd_pcm_release_substream(substream);
2617 kfree(pcm_file);
2618 mutex_unlock(&pcm->open_mutex);
2619 wake_up(&pcm->open_wait);
2620 module_put(pcm->card->module);
2621 snd_card_file_remove(pcm->card, file);
2622 return 0;
2623 }
2624
2625 /* check and update PCM state; return 0 or a negative error
2626 * call this inside PCM lock
2627 */
2628 static int do_pcm_hwsync(struct snd_pcm_substream *substream)
2629 {
2630 switch (substream->runtime->status->state) {
2631 case SNDRV_PCM_STATE_DRAINING:
2632 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
2633 return -EBADFD;
2634 /* Fall through */
2635 case SNDRV_PCM_STATE_RUNNING:
2636 return snd_pcm_update_hw_ptr(substream);
2637 case SNDRV_PCM_STATE_PREPARED:
2638 case SNDRV_PCM_STATE_PAUSED:
2639 return 0;
2640 case SNDRV_PCM_STATE_SUSPENDED:
2641 return -ESTRPIPE;
2642 case SNDRV_PCM_STATE_XRUN:
2643 return -EPIPE;
2644 default:
2645 return -EBADFD;
2646 }
2647 }
2648
2649 /* increase the appl_ptr; returns the processed frames or a negative error */
2650 static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2651 snd_pcm_uframes_t frames,
2652 snd_pcm_sframes_t avail)
2653 {
2654 struct snd_pcm_runtime *runtime = substream->runtime;
2655 snd_pcm_sframes_t appl_ptr;
2656 int ret;
2657
2658 if (avail <= 0)
2659 return 0;
2660 if (frames > (snd_pcm_uframes_t)avail)
2661 frames = avail;
2662 appl_ptr = runtime->control->appl_ptr + frames;
2663 if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
2664 appl_ptr -= runtime->boundary;
2665 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2666 return ret < 0 ? ret : frames;
2667 }
2668
2669 /* decrease the appl_ptr; returns the processed frames or zero for error */
2670 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2671 snd_pcm_uframes_t frames,
2672 snd_pcm_sframes_t avail)
2673 {
2674 struct snd_pcm_runtime *runtime = substream->runtime;
2675 snd_pcm_sframes_t appl_ptr;
2676 int ret;
2677
2678 if (avail <= 0)
2679 return 0;
2680 if (frames > (snd_pcm_uframes_t)avail)
2681 frames = avail;
2682 appl_ptr = runtime->control->appl_ptr - frames;
2683 if (appl_ptr < 0)
2684 appl_ptr += runtime->boundary;
2685 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2686 /* NOTE: we return zero for errors because PulseAudio gets depressed
2687 * upon receiving an error from rewind ioctl and stops processing
2688 * any longer. Returning zero means that no rewind is done, so
2689 * it's not absolutely wrong to answer like that.
2690 */
2691 return ret < 0 ? 0 : frames;
2692 }
2693
2694 static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream,
2695 snd_pcm_uframes_t frames)
2696 {
2697 snd_pcm_sframes_t ret;
2698
2699 if (frames == 0)
2700 return 0;
2701
2702 snd_pcm_stream_lock_irq(substream);
2703 ret = do_pcm_hwsync(substream);
2704 if (!ret)
2705 ret = rewind_appl_ptr(substream, frames,
2706 snd_pcm_hw_avail(substream));
2707 snd_pcm_stream_unlock_irq(substream);
2708 return ret;
2709 }
2710
2711 static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream,
2712 snd_pcm_uframes_t frames)
2713 {
2714 snd_pcm_sframes_t ret;
2715
2716 if (frames == 0)
2717 return 0;
2718
2719 snd_pcm_stream_lock_irq(substream);
2720 ret = do_pcm_hwsync(substream);
2721 if (!ret)
2722 ret = forward_appl_ptr(substream, frames,
2723 snd_pcm_avail(substream));
2724 snd_pcm_stream_unlock_irq(substream);
2725 return ret;
2726 }
2727
2728 static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
2729 {
2730 int err;
2731
2732 snd_pcm_stream_lock_irq(substream);
2733 err = do_pcm_hwsync(substream);
2734 snd_pcm_stream_unlock_irq(substream);
2735 return err;
2736 }
2737
2738 static int snd_pcm_delay(struct snd_pcm_substream *substream,
2739 snd_pcm_sframes_t *delay)
2740 {
2741 int err;
2742 snd_pcm_sframes_t n = 0;
2743
2744 snd_pcm_stream_lock_irq(substream);
2745 err = do_pcm_hwsync(substream);
2746 if (!err)
2747 n = snd_pcm_calc_delay(substream);
2748 snd_pcm_stream_unlock_irq(substream);
2749 if (!err)
2750 *delay = n;
2751 return err;
2752 }
2753
2754 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2755 struct snd_pcm_sync_ptr __user *_sync_ptr)
2756 {
2757 struct snd_pcm_runtime *runtime = substream->runtime;
2758 struct snd_pcm_sync_ptr sync_ptr;
2759 volatile struct snd_pcm_mmap_status *status;
2760 volatile struct snd_pcm_mmap_control *control;
2761 int err;
2762
2763 memset(&sync_ptr, 0, sizeof(sync_ptr));
2764 if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
2765 return -EFAULT;
2766 if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
2767 return -EFAULT;
2768 status = runtime->status;
2769 control = runtime->control;
2770 if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
2771 err = snd_pcm_hwsync(substream);
2772 if (err < 0)
2773 return err;
2774 }
2775 snd_pcm_stream_lock_irq(substream);
2776 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
2777 err = pcm_lib_apply_appl_ptr(substream,
2778 sync_ptr.c.control.appl_ptr);
2779 if (err < 0) {
2780 snd_pcm_stream_unlock_irq(substream);
2781 return err;
2782 }
2783 } else {
2784 sync_ptr.c.control.appl_ptr = control->appl_ptr;
2785 }
2786 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
2787 control->avail_min = sync_ptr.c.control.avail_min;
2788 else
2789 sync_ptr.c.control.avail_min = control->avail_min;
2790 sync_ptr.s.status.state = status->state;
2791 sync_ptr.s.status.hw_ptr = status->hw_ptr;
2792 sync_ptr.s.status.tstamp = status->tstamp;
2793 sync_ptr.s.status.suspended_state = status->suspended_state;
2794 sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
2795 snd_pcm_stream_unlock_irq(substream);
2796 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
2797 return -EFAULT;
2798 return 0;
2799 }
2800
2801 static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
2802 {
2803 struct snd_pcm_runtime *runtime = substream->runtime;
2804 int arg;
2805
2806 if (get_user(arg, _arg))
2807 return -EFAULT;
2808 if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
2809 return -EINVAL;
2810 runtime->tstamp_type = arg;
2811 return 0;
2812 }
2813
2814 static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream,
2815 struct snd_xferi __user *_xferi)
2816 {
2817 struct snd_xferi xferi;
2818 struct snd_pcm_runtime *runtime = substream->runtime;
2819 snd_pcm_sframes_t result;
2820
2821 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2822 return -EBADFD;
2823 if (put_user(0, &_xferi->result))
2824 return -EFAULT;
2825 if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
2826 return -EFAULT;
2827 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2828 result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
2829 else
2830 result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
2831 __put_user(result, &_xferi->result);
2832 return result < 0 ? result : 0;
2833 }
2834
2835 static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream,
2836 struct snd_xfern __user *_xfern)
2837 {
2838 struct snd_xfern xfern;
2839 struct snd_pcm_runtime *runtime = substream->runtime;
2840 void *bufs;
2841 snd_pcm_sframes_t result;
2842
2843 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2844 return -EBADFD;
2845 if (runtime->channels > 128)
2846 return -EINVAL;
2847 if (put_user(0, &_xfern->result))
2848 return -EFAULT;
2849 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2850 return -EFAULT;
2851
2852 bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels);
2853 if (IS_ERR(bufs))
2854 return PTR_ERR(bufs);
2855 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2856 result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
2857 else
2858 result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
2859 kfree(bufs);
2860 __put_user(result, &_xfern->result);
2861 return result < 0 ? result : 0;
2862 }
2863
2864 static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream,
2865 snd_pcm_uframes_t __user *_frames)
2866 {
2867 snd_pcm_uframes_t frames;
2868 snd_pcm_sframes_t result;
2869
2870 if (get_user(frames, _frames))
2871 return -EFAULT;
2872 if (put_user(0, _frames))
2873 return -EFAULT;
2874 result = snd_pcm_rewind(substream, frames);
2875 __put_user(result, _frames);
2876 return result < 0 ? result : 0;
2877 }
2878
2879 static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream,
2880 snd_pcm_uframes_t __user *_frames)
2881 {
2882 snd_pcm_uframes_t frames;
2883 snd_pcm_sframes_t result;
2884
2885 if (get_user(frames, _frames))
2886 return -EFAULT;
2887 if (put_user(0, _frames))
2888 return -EFAULT;
2889 result = snd_pcm_forward(substream, frames);
2890 __put_user(result, _frames);
2891 return result < 0 ? result : 0;
2892 }
2893
2894 static int snd_pcm_common_ioctl(struct file *file,
2895 struct snd_pcm_substream *substream,
2896 unsigned int cmd, void __user *arg)
2897 {
2898 struct snd_pcm_file *pcm_file = file->private_data;
2899 int res;
2900
2901 if (PCM_RUNTIME_CHECK(substream))
2902 return -ENXIO;
2903
2904 res = snd_power_wait(substream->pcm->card, SNDRV_CTL_POWER_D0);
2905 if (res < 0)
2906 return res;
2907
2908 switch (cmd) {
2909 case SNDRV_PCM_IOCTL_PVERSION:
2910 return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
2911 case SNDRV_PCM_IOCTL_INFO:
2912 return snd_pcm_info_user(substream, arg);
2913 case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */
2914 return 0;
2915 case SNDRV_PCM_IOCTL_TTSTAMP:
2916 return snd_pcm_tstamp(substream, arg);
2917 case SNDRV_PCM_IOCTL_USER_PVERSION:
2918 if (get_user(pcm_file->user_pversion,
2919 (unsigned int __user *)arg))
2920 return -EFAULT;
2921 return 0;
2922 case SNDRV_PCM_IOCTL_HW_REFINE:
2923 return snd_pcm_hw_refine_user(substream, arg);
2924 case SNDRV_PCM_IOCTL_HW_PARAMS:
2925 return snd_pcm_hw_params_user(substream, arg);
2926 case SNDRV_PCM_IOCTL_HW_FREE:
2927 return snd_pcm_hw_free(substream);
2928 case SNDRV_PCM_IOCTL_SW_PARAMS:
2929 return snd_pcm_sw_params_user(substream, arg);
2930 case SNDRV_PCM_IOCTL_STATUS:
2931 return snd_pcm_status_user(substream, arg, false);
2932 case SNDRV_PCM_IOCTL_STATUS_EXT:
2933 return snd_pcm_status_user(substream, arg, true);
2934 case SNDRV_PCM_IOCTL_CHANNEL_INFO:
2935 return snd_pcm_channel_info_user(substream, arg);
2936 case SNDRV_PCM_IOCTL_PREPARE:
2937 return snd_pcm_prepare(substream, file);
2938 case SNDRV_PCM_IOCTL_RESET:
2939 return snd_pcm_reset(substream);
2940 case SNDRV_PCM_IOCTL_START:
2941 return snd_pcm_start_lock_irq(substream);
2942 case SNDRV_PCM_IOCTL_LINK:
2943 return snd_pcm_link(substream, (int)(unsigned long) arg);
2944 case SNDRV_PCM_IOCTL_UNLINK:
2945 return snd_pcm_unlink(substream);
2946 case SNDRV_PCM_IOCTL_RESUME:
2947 return snd_pcm_resume(substream);
2948 case SNDRV_PCM_IOCTL_XRUN:
2949 return snd_pcm_xrun(substream);
2950 case SNDRV_PCM_IOCTL_HWSYNC:
2951 return snd_pcm_hwsync(substream);
2952 case SNDRV_PCM_IOCTL_DELAY:
2953 {
2954 snd_pcm_sframes_t delay;
2955 snd_pcm_sframes_t __user *res = arg;
2956 int err;
2957
2958 err = snd_pcm_delay(substream, &delay);
2959 if (err)
2960 return err;
2961 if (put_user(delay, res))
2962 return -EFAULT;
2963 return 0;
2964 }
2965 case SNDRV_PCM_IOCTL_SYNC_PTR:
2966 return snd_pcm_sync_ptr(substream, arg);
2967 #ifdef CONFIG_SND_SUPPORT_OLD_API
2968 case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
2969 return snd_pcm_hw_refine_old_user(substream, arg);
2970 case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
2971 return snd_pcm_hw_params_old_user(substream, arg);
2972 #endif
2973 case SNDRV_PCM_IOCTL_DRAIN:
2974 return snd_pcm_drain(substream, file);
2975 case SNDRV_PCM_IOCTL_DROP:
2976 return snd_pcm_drop(substream);
2977 case SNDRV_PCM_IOCTL_PAUSE:
2978 return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
2979 substream,
2980 (int)(unsigned long)arg);
2981 case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
2982 case SNDRV_PCM_IOCTL_READI_FRAMES:
2983 return snd_pcm_xferi_frames_ioctl(substream, arg);
2984 case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
2985 case SNDRV_PCM_IOCTL_READN_FRAMES:
2986 return snd_pcm_xfern_frames_ioctl(substream, arg);
2987 case SNDRV_PCM_IOCTL_REWIND:
2988 return snd_pcm_rewind_ioctl(substream, arg);
2989 case SNDRV_PCM_IOCTL_FORWARD:
2990 return snd_pcm_forward_ioctl(substream, arg);
2991 }
2992 pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
2993 return -ENOTTY;
2994 }
2995
2996 static long snd_pcm_ioctl(struct file *file, unsigned int cmd,
2997 unsigned long arg)
2998 {
2999 struct snd_pcm_file *pcm_file;
3000
3001 pcm_file = file->private_data;
3002
3003 if (((cmd >> 8) & 0xff) != 'A')
3004 return -ENOTTY;
3005
3006 return snd_pcm_common_ioctl(file, pcm_file->substream, cmd,
3007 (void __user *)arg);
3008 }
3009
3010 /**
3011 * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
3012 * @substream: PCM substream
3013 * @cmd: IOCTL cmd
3014 * @arg: IOCTL argument
3015 *
3016 * The function is provided primarily for OSS layer and USB gadget drivers,
3017 * and it allows only the limited set of ioctls (hw_params, sw_params,
3018 * prepare, start, drain, drop, forward).
3019 */
3020 int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3021 unsigned int cmd, void *arg)
3022 {
3023 snd_pcm_uframes_t *frames = arg;
3024 snd_pcm_sframes_t result;
3025
3026 switch (cmd) {
3027 case SNDRV_PCM_IOCTL_FORWARD:
3028 {
3029 /* provided only for OSS; capture-only and no value returned */
3030 if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
3031 return -EINVAL;
3032 result = snd_pcm_forward(substream, *frames);
3033 return result < 0 ? result : 0;
3034 }
3035 case SNDRV_PCM_IOCTL_HW_PARAMS:
3036 return snd_pcm_hw_params(substream, arg);
3037 case SNDRV_PCM_IOCTL_SW_PARAMS:
3038 return snd_pcm_sw_params(substream, arg);
3039 case SNDRV_PCM_IOCTL_PREPARE:
3040 return snd_pcm_prepare(substream, NULL);
3041 case SNDRV_PCM_IOCTL_START:
3042 return snd_pcm_start_lock_irq(substream);
3043 case SNDRV_PCM_IOCTL_DRAIN:
3044 return snd_pcm_drain(substream, NULL);
3045 case SNDRV_PCM_IOCTL_DROP:
3046 return snd_pcm_drop(substream);
3047 case SNDRV_PCM_IOCTL_DELAY:
3048 return snd_pcm_delay(substream, frames);
3049 default:
3050 return -EINVAL;
3051 }
3052 }
3053 EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
3054
3055 static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
3056 loff_t * offset)
3057 {
3058 struct snd_pcm_file *pcm_file;
3059 struct snd_pcm_substream *substream;
3060 struct snd_pcm_runtime *runtime;
3061 snd_pcm_sframes_t result;
3062
3063 pcm_file = file->private_data;
3064 substream = pcm_file->substream;
3065 if (PCM_RUNTIME_CHECK(substream))
3066 return -ENXIO;
3067 runtime = substream->runtime;
3068 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3069 return -EBADFD;
3070 if (!frame_aligned(runtime, count))
3071 return -EINVAL;
3072 count = bytes_to_frames(runtime, count);
3073 result = snd_pcm_lib_read(substream, buf, count);
3074 if (result > 0)
3075 result = frames_to_bytes(runtime, result);
3076 return result;
3077 }
3078
3079 static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
3080 size_t count, loff_t * offset)
3081 {
3082 struct snd_pcm_file *pcm_file;
3083 struct snd_pcm_substream *substream;
3084 struct snd_pcm_runtime *runtime;
3085 snd_pcm_sframes_t result;
3086
3087 pcm_file = file->private_data;
3088 substream = pcm_file->substream;
3089 if (PCM_RUNTIME_CHECK(substream))
3090 return -ENXIO;
3091 runtime = substream->runtime;
3092 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3093 return -EBADFD;
3094 if (!frame_aligned(runtime, count))
3095 return -EINVAL;
3096 count = bytes_to_frames(runtime, count);
3097 result = snd_pcm_lib_write(substream, buf, count);
3098 if (result > 0)
3099 result = frames_to_bytes(runtime, result);
3100 return result;
3101 }
3102
3103 static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
3104 {
3105 struct snd_pcm_file *pcm_file;
3106 struct snd_pcm_substream *substream;
3107 struct snd_pcm_runtime *runtime;
3108 snd_pcm_sframes_t result;
3109 unsigned long i;
3110 void __user **bufs;
3111 snd_pcm_uframes_t frames;
3112
3113 pcm_file = iocb->ki_filp->private_data;
3114 substream = pcm_file->substream;
3115 if (PCM_RUNTIME_CHECK(substream))
3116 return -ENXIO;
3117 runtime = substream->runtime;
3118 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3119 return -EBADFD;
3120 if (!iter_is_iovec(to))
3121 return -EINVAL;
3122 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
3123 return -EINVAL;
3124 if (!frame_aligned(runtime, to->iov->iov_len))
3125 return -EINVAL;
3126 frames = bytes_to_samples(runtime, to->iov->iov_len);
3127 bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
3128 if (bufs == NULL)
3129 return -ENOMEM;
3130 for (i = 0; i < to->nr_segs; ++i)
3131 bufs[i] = to->iov[i].iov_base;
3132 result = snd_pcm_lib_readv(substream, bufs, frames);
3133 if (result > 0)
3134 result = frames_to_bytes(runtime, result);
3135 kfree(bufs);
3136 return result;
3137 }
3138
3139 static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
3140 {
3141 struct snd_pcm_file *pcm_file;
3142 struct snd_pcm_substream *substream;
3143 struct snd_pcm_runtime *runtime;
3144 snd_pcm_sframes_t result;
3145 unsigned long i;
3146 void __user **bufs;
3147 snd_pcm_uframes_t frames;
3148
3149 pcm_file = iocb->ki_filp->private_data;
3150 substream = pcm_file->substream;
3151 if (PCM_RUNTIME_CHECK(substream))
3152 return -ENXIO;
3153 runtime = substream->runtime;
3154 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3155 return -EBADFD;
3156 if (!iter_is_iovec(from))
3157 return -EINVAL;
3158 if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
3159 !frame_aligned(runtime, from->iov->iov_len))
3160 return -EINVAL;
3161 frames = bytes_to_samples(runtime, from->iov->iov_len);
3162 bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
3163 if (bufs == NULL)
3164 return -ENOMEM;
3165 for (i = 0; i < from->nr_segs; ++i)
3166 bufs[i] = from->iov[i].iov_base;
3167 result = snd_pcm_lib_writev(substream, bufs, frames);
3168 if (result > 0)
3169 result = frames_to_bytes(runtime, result);
3170 kfree(bufs);
3171 return result;
3172 }
3173
3174 static __poll_t snd_pcm_poll(struct file *file, poll_table *wait)
3175 {
3176 struct snd_pcm_file *pcm_file;
3177 struct snd_pcm_substream *substream;
3178 struct snd_pcm_runtime *runtime;
3179 __poll_t mask, ok;
3180 snd_pcm_uframes_t avail;
3181
3182 pcm_file = file->private_data;
3183
3184 substream = pcm_file->substream;
3185 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3186 ok = EPOLLOUT | EPOLLWRNORM;
3187 else
3188 ok = EPOLLIN | EPOLLRDNORM;
3189 if (PCM_RUNTIME_CHECK(substream))
3190 return ok | EPOLLERR;
3191
3192 runtime = substream->runtime;
3193 poll_wait(file, &runtime->sleep, wait);
3194
3195 mask = 0;
3196 snd_pcm_stream_lock_irq(substream);
3197 avail = snd_pcm_avail(substream);
3198 switch (runtime->status->state) {
3199 case SNDRV_PCM_STATE_RUNNING:
3200 case SNDRV_PCM_STATE_PREPARED:
3201 case SNDRV_PCM_STATE_PAUSED:
3202 if (avail >= runtime->control->avail_min)
3203 mask = ok;
3204 break;
3205 case SNDRV_PCM_STATE_DRAINING:
3206 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
3207 mask = ok;
3208 if (!avail)
3209 mask |= EPOLLERR;
3210 }
3211 break;
3212 default:
3213 mask = ok | EPOLLERR;
3214 break;
3215 }
3216 snd_pcm_stream_unlock_irq(substream);
3217 return mask;
3218 }
3219
3220 /*
3221 * mmap support
3222 */
3223
3224 /*
3225 * Only on coherent architectures, we can mmap the status and the control records
3226 * for effcient data transfer. On others, we have to use HWSYNC ioctl...
3227 */
3228 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3229 /*
3230 * mmap status record
3231 */
3232 static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3233 {
3234 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3235 struct snd_pcm_runtime *runtime;
3236
3237 if (substream == NULL)
3238 return VM_FAULT_SIGBUS;
3239 runtime = substream->runtime;
3240 vmf->page = virt_to_page(runtime->status);
3241 get_page(vmf->page);
3242 return 0;
3243 }
3244
3245 static const struct vm_operations_struct snd_pcm_vm_ops_status =
3246 {
3247 .fault = snd_pcm_mmap_status_fault,
3248 };
3249
3250 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3251 struct vm_area_struct *area)
3252 {
3253 long size;
3254 if (!(area->vm_flags & VM_READ))
3255 return -EINVAL;
3256 size = area->vm_end - area->vm_start;
3257 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3258 return -EINVAL;
3259 area->vm_ops = &snd_pcm_vm_ops_status;
3260 area->vm_private_data = substream;
3261 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3262 return 0;
3263 }
3264
3265 /*
3266 * mmap control record
3267 */
3268 static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3269 {
3270 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3271 struct snd_pcm_runtime *runtime;
3272
3273 if (substream == NULL)
3274 return VM_FAULT_SIGBUS;
3275 runtime = substream->runtime;
3276 vmf->page = virt_to_page(runtime->control);
3277 get_page(vmf->page);
3278 return 0;
3279 }
3280
3281 static const struct vm_operations_struct snd_pcm_vm_ops_control =
3282 {
3283 .fault = snd_pcm_mmap_control_fault,
3284 };
3285
3286 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3287 struct vm_area_struct *area)
3288 {
3289 long size;
3290 if (!(area->vm_flags & VM_READ))
3291 return -EINVAL;
3292 size = area->vm_end - area->vm_start;
3293 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3294 return -EINVAL;
3295 area->vm_ops = &snd_pcm_vm_ops_control;
3296 area->vm_private_data = substream;
3297 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3298 return 0;
3299 }
3300
3301 static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
3302 {
3303 if (pcm_file->no_compat_mmap)
3304 return false;
3305 /* See pcm_control_mmap_allowed() below.
3306 * Since older alsa-lib requires both status and control mmaps to be
3307 * coupled, we have to disable the status mmap for old alsa-lib, too.
3308 */
3309 if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
3310 (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
3311 return false;
3312 return true;
3313 }
3314
3315 static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
3316 {
3317 if (pcm_file->no_compat_mmap)
3318 return false;
3319 /* Disallow the control mmap when SYNC_APPLPTR flag is set;
3320 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3321 * thus it effectively assures the manual update of appl_ptr.
3322 */
3323 if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
3324 return false;
3325 return true;
3326 }
3327
3328 #else /* ! coherent mmap */
3329 /*
3330 * don't support mmap for status and control records.
3331 */
3332 #define pcm_status_mmap_allowed(pcm_file) false
3333 #define pcm_control_mmap_allowed(pcm_file) false
3334
3335 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3336 struct vm_area_struct *area)
3337 {
3338 return -ENXIO;
3339 }
3340 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3341 struct vm_area_struct *area)
3342 {
3343 return -ENXIO;
3344 }
3345 #endif /* coherent mmap */
3346
3347 static inline struct page *
3348 snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
3349 {
3350 void *vaddr = substream->runtime->dma_area + ofs;
3351 return virt_to_page(vaddr);
3352 }
3353
3354 /*
3355 * fault callback for mmapping a RAM page
3356 */
3357 static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3358 {
3359 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3360 struct snd_pcm_runtime *runtime;
3361 unsigned long offset;
3362 struct page * page;
3363 size_t dma_bytes;
3364
3365 if (substream == NULL)
3366 return VM_FAULT_SIGBUS;
3367 runtime = substream->runtime;
3368 offset = vmf->pgoff << PAGE_SHIFT;
3369 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3370 if (offset > dma_bytes - PAGE_SIZE)
3371 return VM_FAULT_SIGBUS;
3372 if (substream->ops->page)
3373 page = substream->ops->page(substream, offset);
3374 else
3375 page = snd_pcm_default_page_ops(substream, offset);
3376 if (!page)
3377 return VM_FAULT_SIGBUS;
3378 get_page(page);
3379 vmf->page = page;
3380 return 0;
3381 }
3382
3383 static const struct vm_operations_struct snd_pcm_vm_ops_data = {
3384 .open = snd_pcm_mmap_data_open,
3385 .close = snd_pcm_mmap_data_close,
3386 };
3387
3388 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3389 .open = snd_pcm_mmap_data_open,
3390 .close = snd_pcm_mmap_data_close,
3391 .fault = snd_pcm_mmap_data_fault,
3392 };
3393
3394 /*
3395 * mmap the DMA buffer on RAM
3396 */
3397
3398 /**
3399 * snd_pcm_lib_default_mmap - Default PCM data mmap function
3400 * @substream: PCM substream
3401 * @area: VMA
3402 *
3403 * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
3404 * this function is invoked implicitly.
3405 */
3406 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3407 struct vm_area_struct *area)
3408 {
3409 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3410 #ifdef CONFIG_GENERIC_ALLOCATOR
3411 if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
3412 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
3413 return remap_pfn_range(area, area->vm_start,
3414 substream->dma_buffer.addr >> PAGE_SHIFT,
3415 area->vm_end - area->vm_start, area->vm_page_prot);
3416 }
3417 #endif /* CONFIG_GENERIC_ALLOCATOR */
3418 #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
3419 if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
3420 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
3421 return dma_mmap_coherent(substream->dma_buffer.dev.dev,
3422 area,
3423 substream->runtime->dma_area,
3424 substream->runtime->dma_addr,
3425 substream->runtime->dma_bytes);
3426 #endif /* CONFIG_X86 */
3427 /* mmap with fault handler */
3428 area->vm_ops = &snd_pcm_vm_ops_data_fault;
3429 return 0;
3430 }
3431 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3432
3433 /*
3434 * mmap the DMA buffer on I/O memory area
3435 */
3436 #if SNDRV_PCM_INFO_MMAP_IOMEM
3437 /**
3438 * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3439 * @substream: PCM substream
3440 * @area: VMA
3441 *
3442 * When your hardware uses the iomapped pages as the hardware buffer and
3443 * wants to mmap it, pass this function as mmap pcm_ops. Note that this
3444 * is supposed to work only on limited architectures.
3445 */
3446 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3447 struct vm_area_struct *area)
3448 {
3449 struct snd_pcm_runtime *runtime = substream->runtime;
3450
3451 area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3452 return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3453 }
3454 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
3455 #endif /* SNDRV_PCM_INFO_MMAP */
3456
3457 /*
3458 * mmap DMA buffer
3459 */
3460 int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
3461 struct vm_area_struct *area)
3462 {
3463 struct snd_pcm_runtime *runtime;
3464 long size;
3465 unsigned long offset;
3466 size_t dma_bytes;
3467 int err;
3468
3469 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3470 if (!(area->vm_flags & (VM_WRITE|VM_READ)))
3471 return -EINVAL;
3472 } else {
3473 if (!(area->vm_flags & VM_READ))
3474 return -EINVAL;
3475 }
3476 runtime = substream->runtime;
3477 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3478 return -EBADFD;
3479 if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
3480 return -ENXIO;
3481 if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
3482 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
3483 return -EINVAL;
3484 size = area->vm_end - area->vm_start;
3485 offset = area->vm_pgoff << PAGE_SHIFT;
3486 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3487 if ((size_t)size > dma_bytes)
3488 return -EINVAL;
3489 if (offset > dma_bytes - size)
3490 return -EINVAL;
3491
3492 area->vm_ops = &snd_pcm_vm_ops_data;
3493 area->vm_private_data = substream;
3494 if (substream->ops->mmap)
3495 err = substream->ops->mmap(substream, area);
3496 else
3497 err = snd_pcm_lib_default_mmap(substream, area);
3498 if (!err)
3499 atomic_inc(&substream->mmap_count);
3500 return err;
3501 }
3502 EXPORT_SYMBOL(snd_pcm_mmap_data);
3503
3504 static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
3505 {
3506 struct snd_pcm_file * pcm_file;
3507 struct snd_pcm_substream *substream;
3508 unsigned long offset;
3509
3510 pcm_file = file->private_data;
3511 substream = pcm_file->substream;
3512 if (PCM_RUNTIME_CHECK(substream))
3513 return -ENXIO;
3514
3515 offset = area->vm_pgoff << PAGE_SHIFT;
3516 switch (offset) {
3517 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3518 if (!pcm_status_mmap_allowed(pcm_file))
3519 return -ENXIO;
3520 return snd_pcm_mmap_status(substream, file, area);
3521 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3522 if (!pcm_control_mmap_allowed(pcm_file))
3523 return -ENXIO;
3524 return snd_pcm_mmap_control(substream, file, area);
3525 default:
3526 return snd_pcm_mmap_data(substream, file, area);
3527 }
3528 return 0;
3529 }
3530
3531 static int snd_pcm_fasync(int fd, struct file * file, int on)
3532 {
3533 struct snd_pcm_file * pcm_file;
3534 struct snd_pcm_substream *substream;
3535 struct snd_pcm_runtime *runtime;
3536
3537 pcm_file = file->private_data;
3538 substream = pcm_file->substream;
3539 if (PCM_RUNTIME_CHECK(substream))
3540 return -ENXIO;
3541 runtime = substream->runtime;
3542 return fasync_helper(fd, file, on, &runtime->fasync);
3543 }
3544
3545 /*
3546 * ioctl32 compat
3547 */
3548 #ifdef CONFIG_COMPAT
3549 #include "pcm_compat.c"
3550 #else
3551 #define snd_pcm_ioctl_compat NULL
3552 #endif
3553
3554 /*
3555 * To be removed helpers to keep binary compatibility
3556 */
3557
3558 #ifdef CONFIG_SND_SUPPORT_OLD_API
3559 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
3560 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
3561
3562 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
3563 struct snd_pcm_hw_params_old *oparams)
3564 {
3565 unsigned int i;
3566
3567 memset(params, 0, sizeof(*params));
3568 params->flags = oparams->flags;
3569 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3570 params->masks[i].bits[0] = oparams->masks[i];
3571 memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
3572 params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
3573 params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
3574 params->info = oparams->info;
3575 params->msbits = oparams->msbits;
3576 params->rate_num = oparams->rate_num;
3577 params->rate_den = oparams->rate_den;
3578 params->fifo_size = oparams->fifo_size;
3579 }
3580
3581 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
3582 struct snd_pcm_hw_params *params)
3583 {
3584 unsigned int i;
3585
3586 memset(oparams, 0, sizeof(*oparams));
3587 oparams->flags = params->flags;
3588 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3589 oparams->masks[i] = params->masks[i].bits[0];
3590 memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
3591 oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
3592 oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
3593 oparams->info = params->info;
3594 oparams->msbits = params->msbits;
3595 oparams->rate_num = params->rate_num;
3596 oparams->rate_den = params->rate_den;
3597 oparams->fifo_size = params->fifo_size;
3598 }
3599
3600 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3601 struct snd_pcm_hw_params_old __user * _oparams)
3602 {
3603 struct snd_pcm_hw_params *params;
3604 struct snd_pcm_hw_params_old *oparams = NULL;
3605 int err;
3606
3607 params = kmalloc(sizeof(*params), GFP_KERNEL);
3608 if (!params)
3609 return -ENOMEM;
3610
3611 oparams = memdup_user(_oparams, sizeof(*oparams));
3612 if (IS_ERR(oparams)) {
3613 err = PTR_ERR(oparams);
3614 goto out;
3615 }
3616 snd_pcm_hw_convert_from_old_params(params, oparams);
3617 err = snd_pcm_hw_refine(substream, params);
3618 if (err < 0)
3619 goto out_old;
3620
3621 err = fixup_unreferenced_params(substream, params);
3622 if (err < 0)
3623 goto out_old;
3624
3625 snd_pcm_hw_convert_to_old_params(oparams, params);
3626 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3627 err = -EFAULT;
3628 out_old:
3629 kfree(oparams);
3630 out:
3631 kfree(params);
3632 return err;
3633 }
3634
3635 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3636 struct snd_pcm_hw_params_old __user * _oparams)
3637 {
3638 struct snd_pcm_hw_params *params;
3639 struct snd_pcm_hw_params_old *oparams = NULL;
3640 int err;
3641
3642 params = kmalloc(sizeof(*params), GFP_KERNEL);
3643 if (!params)
3644 return -ENOMEM;
3645
3646 oparams = memdup_user(_oparams, sizeof(*oparams));
3647 if (IS_ERR(oparams)) {
3648 err = PTR_ERR(oparams);
3649 goto out;
3650 }
3651
3652 snd_pcm_hw_convert_from_old_params(params, oparams);
3653 err = snd_pcm_hw_params(substream, params);
3654 if (err < 0)
3655 goto out_old;
3656
3657 snd_pcm_hw_convert_to_old_params(oparams, params);
3658 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3659 err = -EFAULT;
3660 out_old:
3661 kfree(oparams);
3662 out:
3663 kfree(params);
3664 return err;
3665 }
3666 #endif /* CONFIG_SND_SUPPORT_OLD_API */
3667
3668 #ifndef CONFIG_MMU
3669 static unsigned long snd_pcm_get_unmapped_area(struct file *file,
3670 unsigned long addr,
3671 unsigned long len,
3672 unsigned long pgoff,
3673 unsigned long flags)
3674 {
3675 struct snd_pcm_file *pcm_file = file->private_data;
3676 struct snd_pcm_substream *substream = pcm_file->substream;
3677 struct snd_pcm_runtime *runtime = substream->runtime;
3678 unsigned long offset = pgoff << PAGE_SHIFT;
3679
3680 switch (offset) {
3681 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3682 return (unsigned long)runtime->status;
3683 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3684 return (unsigned long)runtime->control;
3685 default:
3686 return (unsigned long)runtime->dma_area + offset;
3687 }
3688 }
3689 #else
3690 # define snd_pcm_get_unmapped_area NULL
3691 #endif
3692
3693 /*
3694 * Register section
3695 */
3696
3697 const struct file_operations snd_pcm_f_ops[2] = {
3698 {
3699 .owner = THIS_MODULE,
3700 .write = snd_pcm_write,
3701 .write_iter = snd_pcm_writev,
3702 .open = snd_pcm_playback_open,
3703 .release = snd_pcm_release,
3704 .llseek = no_llseek,
3705 .poll = snd_pcm_poll,
3706 .unlocked_ioctl = snd_pcm_ioctl,
3707 .compat_ioctl = snd_pcm_ioctl_compat,
3708 .mmap = snd_pcm_mmap,
3709 .fasync = snd_pcm_fasync,
3710 .get_unmapped_area = snd_pcm_get_unmapped_area,
3711 },
3712 {
3713 .owner = THIS_MODULE,
3714 .read = snd_pcm_read,
3715 .read_iter = snd_pcm_readv,
3716 .open = snd_pcm_capture_open,
3717 .release = snd_pcm_release,
3718 .llseek = no_llseek,
3719 .poll = snd_pcm_poll,
3720 .unlocked_ioctl = snd_pcm_ioctl,
3721 .compat_ioctl = snd_pcm_ioctl_compat,
3722 .mmap = snd_pcm_mmap,
3723 .fasync = snd_pcm_fasync,
3724 .get_unmapped_area = snd_pcm_get_unmapped_area,
3725 }
3726 };