]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - sound/core/pcm_native.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
[mirror_ubuntu-hirsute-kernel.git] / sound / core / pcm_native.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Digital Audio (PCM) abstract layer
4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5 */
6
7 #include <linux/mm.h>
8 #include <linux/module.h>
9 #include <linux/file.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/time.h>
13 #include <linux/pm_qos.h>
14 #include <linux/io.h>
15 #include <linux/dma-mapping.h>
16 #include <sound/core.h>
17 #include <sound/control.h>
18 #include <sound/info.h>
19 #include <sound/pcm.h>
20 #include <sound/pcm_params.h>
21 #include <sound/timer.h>
22 #include <sound/minors.h>
23 #include <linux/uio.h>
24 #include <linux/delay.h>
25
26 #include "pcm_local.h"
27
28 #ifdef CONFIG_SND_DEBUG
29 #define CREATE_TRACE_POINTS
30 #include "pcm_param_trace.h"
31 #else
32 #define trace_hw_mask_param_enabled() 0
33 #define trace_hw_interval_param_enabled() 0
34 #define trace_hw_mask_param(substream, type, index, prev, curr)
35 #define trace_hw_interval_param(substream, type, index, prev, curr)
36 #endif
37
38 /*
39 * Compatibility
40 */
41
42 struct snd_pcm_hw_params_old {
43 unsigned int flags;
44 unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
45 SNDRV_PCM_HW_PARAM_ACCESS + 1];
46 struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
47 SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
48 unsigned int rmask;
49 unsigned int cmask;
50 unsigned int info;
51 unsigned int msbits;
52 unsigned int rate_num;
53 unsigned int rate_den;
54 snd_pcm_uframes_t fifo_size;
55 unsigned char reserved[64];
56 };
57
58 #ifdef CONFIG_SND_SUPPORT_OLD_API
59 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
60 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
61
62 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
63 struct snd_pcm_hw_params_old __user * _oparams);
64 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
65 struct snd_pcm_hw_params_old __user * _oparams);
66 #endif
67 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
68
69 /*
70 *
71 */
72
73 static DECLARE_RWSEM(snd_pcm_link_rwsem);
74
75 void snd_pcm_group_init(struct snd_pcm_group *group)
76 {
77 spin_lock_init(&group->lock);
78 mutex_init(&group->mutex);
79 INIT_LIST_HEAD(&group->substreams);
80 refcount_set(&group->refs, 0);
81 }
82
83 /* define group lock helpers */
84 #define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \
85 static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
86 { \
87 if (nonatomic) \
88 mutex_ ## mutex_action(&group->mutex); \
89 else \
90 spin_ ## action(&group->lock); \
91 }
92
93 DEFINE_PCM_GROUP_LOCK(lock, lock);
94 DEFINE_PCM_GROUP_LOCK(unlock, unlock);
95 DEFINE_PCM_GROUP_LOCK(lock_irq, lock);
96 DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock);
97
98 /**
99 * snd_pcm_stream_lock - Lock the PCM stream
100 * @substream: PCM substream
101 *
102 * This locks the PCM stream's spinlock or mutex depending on the nonatomic
103 * flag of the given substream. This also takes the global link rw lock
104 * (or rw sem), too, for avoiding the race with linked streams.
105 */
106 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
107 {
108 snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic);
109 }
110 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
111
112 /**
113 * snd_pcm_stream_lock - Unlock the PCM stream
114 * @substream: PCM substream
115 *
116 * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
117 */
118 void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
119 {
120 snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic);
121 }
122 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
123
124 /**
125 * snd_pcm_stream_lock_irq - Lock the PCM stream
126 * @substream: PCM substream
127 *
128 * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
129 * IRQ (only when nonatomic is false). In nonatomic case, this is identical
130 * as snd_pcm_stream_lock().
131 */
132 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
133 {
134 snd_pcm_group_lock_irq(&substream->self_group,
135 substream->pcm->nonatomic);
136 }
137 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
138
139 /**
140 * snd_pcm_stream_unlock_irq - Unlock the PCM stream
141 * @substream: PCM substream
142 *
143 * This is a counter-part of snd_pcm_stream_lock_irq().
144 */
145 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
146 {
147 snd_pcm_group_unlock_irq(&substream->self_group,
148 substream->pcm->nonatomic);
149 }
150 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
151
152 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
153 {
154 unsigned long flags = 0;
155 if (substream->pcm->nonatomic)
156 mutex_lock(&substream->self_group.mutex);
157 else
158 spin_lock_irqsave(&substream->self_group.lock, flags);
159 return flags;
160 }
161 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
162
163 /**
164 * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
165 * @substream: PCM substream
166 * @flags: irq flags
167 *
168 * This is a counter-part of snd_pcm_stream_lock_irqsave().
169 */
170 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
171 unsigned long flags)
172 {
173 if (substream->pcm->nonatomic)
174 mutex_unlock(&substream->self_group.mutex);
175 else
176 spin_unlock_irqrestore(&substream->self_group.lock, flags);
177 }
178 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
179
180 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
181 {
182 struct snd_pcm *pcm = substream->pcm;
183 struct snd_pcm_str *pstr = substream->pstr;
184
185 memset(info, 0, sizeof(*info));
186 info->card = pcm->card->number;
187 info->device = pcm->device;
188 info->stream = substream->stream;
189 info->subdevice = substream->number;
190 strlcpy(info->id, pcm->id, sizeof(info->id));
191 strlcpy(info->name, pcm->name, sizeof(info->name));
192 info->dev_class = pcm->dev_class;
193 info->dev_subclass = pcm->dev_subclass;
194 info->subdevices_count = pstr->substream_count;
195 info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
196 strlcpy(info->subname, substream->name, sizeof(info->subname));
197
198 return 0;
199 }
200
201 int snd_pcm_info_user(struct snd_pcm_substream *substream,
202 struct snd_pcm_info __user * _info)
203 {
204 struct snd_pcm_info *info;
205 int err;
206
207 info = kmalloc(sizeof(*info), GFP_KERNEL);
208 if (! info)
209 return -ENOMEM;
210 err = snd_pcm_info(substream, info);
211 if (err >= 0) {
212 if (copy_to_user(_info, info, sizeof(*info)))
213 err = -EFAULT;
214 }
215 kfree(info);
216 return err;
217 }
218
219 static bool hw_support_mmap(struct snd_pcm_substream *substream)
220 {
221 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
222 return false;
223 /* architecture supports dma_mmap_coherent()? */
224 #if defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) || !defined(CONFIG_HAS_DMA)
225 if (!substream->ops->mmap &&
226 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
227 return false;
228 #endif
229 return true;
230 }
231
232 static int constrain_mask_params(struct snd_pcm_substream *substream,
233 struct snd_pcm_hw_params *params)
234 {
235 struct snd_pcm_hw_constraints *constrs =
236 &substream->runtime->hw_constraints;
237 struct snd_mask *m;
238 unsigned int k;
239 struct snd_mask old_mask;
240 int changed;
241
242 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
243 m = hw_param_mask(params, k);
244 if (snd_mask_empty(m))
245 return -EINVAL;
246
247 /* This parameter is not requested to change by a caller. */
248 if (!(params->rmask & (1 << k)))
249 continue;
250
251 if (trace_hw_mask_param_enabled())
252 old_mask = *m;
253
254 changed = snd_mask_refine(m, constrs_mask(constrs, k));
255 if (changed < 0)
256 return changed;
257 if (changed == 0)
258 continue;
259
260 /* Set corresponding flag so that the caller gets it. */
261 trace_hw_mask_param(substream, k, 0, &old_mask, m);
262 params->cmask |= 1 << k;
263 }
264
265 return 0;
266 }
267
268 static int constrain_interval_params(struct snd_pcm_substream *substream,
269 struct snd_pcm_hw_params *params)
270 {
271 struct snd_pcm_hw_constraints *constrs =
272 &substream->runtime->hw_constraints;
273 struct snd_interval *i;
274 unsigned int k;
275 struct snd_interval old_interval;
276 int changed;
277
278 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
279 i = hw_param_interval(params, k);
280 if (snd_interval_empty(i))
281 return -EINVAL;
282
283 /* This parameter is not requested to change by a caller. */
284 if (!(params->rmask & (1 << k)))
285 continue;
286
287 if (trace_hw_interval_param_enabled())
288 old_interval = *i;
289
290 changed = snd_interval_refine(i, constrs_interval(constrs, k));
291 if (changed < 0)
292 return changed;
293 if (changed == 0)
294 continue;
295
296 /* Set corresponding flag so that the caller gets it. */
297 trace_hw_interval_param(substream, k, 0, &old_interval, i);
298 params->cmask |= 1 << k;
299 }
300
301 return 0;
302 }
303
304 static int constrain_params_by_rules(struct snd_pcm_substream *substream,
305 struct snd_pcm_hw_params *params)
306 {
307 struct snd_pcm_hw_constraints *constrs =
308 &substream->runtime->hw_constraints;
309 unsigned int k;
310 unsigned int *rstamps;
311 unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
312 unsigned int stamp;
313 struct snd_pcm_hw_rule *r;
314 unsigned int d;
315 struct snd_mask old_mask;
316 struct snd_interval old_interval;
317 bool again;
318 int changed, err = 0;
319
320 /*
321 * Each application of rule has own sequence number.
322 *
323 * Each member of 'rstamps' array represents the sequence number of
324 * recent application of corresponding rule.
325 */
326 rstamps = kcalloc(constrs->rules_num, sizeof(unsigned int), GFP_KERNEL);
327 if (!rstamps)
328 return -ENOMEM;
329
330 /*
331 * Each member of 'vstamps' array represents the sequence number of
332 * recent application of rule in which corresponding parameters were
333 * changed.
334 *
335 * In initial state, elements corresponding to parameters requested by
336 * a caller is 1. For unrequested parameters, corresponding members
337 * have 0 so that the parameters are never changed anymore.
338 */
339 for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
340 vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
341
342 /* Due to the above design, actual sequence number starts at 2. */
343 stamp = 2;
344 retry:
345 /* Apply all rules in order. */
346 again = false;
347 for (k = 0; k < constrs->rules_num; k++) {
348 r = &constrs->rules[k];
349
350 /*
351 * Check condition bits of this rule. When the rule has
352 * some condition bits, parameter without the bits is
353 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
354 * is an example of the condition bits.
355 */
356 if (r->cond && !(r->cond & params->flags))
357 continue;
358
359 /*
360 * The 'deps' array includes maximum three dependencies
361 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
362 * member of this array is a sentinel and should be
363 * negative value.
364 *
365 * This rule should be processed in this time when dependent
366 * parameters were changed at former applications of the other
367 * rules.
368 */
369 for (d = 0; r->deps[d] >= 0; d++) {
370 if (vstamps[r->deps[d]] > rstamps[k])
371 break;
372 }
373 if (r->deps[d] < 0)
374 continue;
375
376 if (trace_hw_mask_param_enabled()) {
377 if (hw_is_mask(r->var))
378 old_mask = *hw_param_mask(params, r->var);
379 }
380 if (trace_hw_interval_param_enabled()) {
381 if (hw_is_interval(r->var))
382 old_interval = *hw_param_interval(params, r->var);
383 }
384
385 changed = r->func(params, r);
386 if (changed < 0) {
387 err = changed;
388 goto out;
389 }
390
391 /*
392 * When the parameter is changed, notify it to the caller
393 * by corresponding returned bit, then preparing for next
394 * iteration.
395 */
396 if (changed && r->var >= 0) {
397 if (hw_is_mask(r->var)) {
398 trace_hw_mask_param(substream, r->var,
399 k + 1, &old_mask,
400 hw_param_mask(params, r->var));
401 }
402 if (hw_is_interval(r->var)) {
403 trace_hw_interval_param(substream, r->var,
404 k + 1, &old_interval,
405 hw_param_interval(params, r->var));
406 }
407
408 params->cmask |= (1 << r->var);
409 vstamps[r->var] = stamp;
410 again = true;
411 }
412
413 rstamps[k] = stamp++;
414 }
415
416 /* Iterate to evaluate all rules till no parameters are changed. */
417 if (again)
418 goto retry;
419
420 out:
421 kfree(rstamps);
422 return err;
423 }
424
425 static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
426 struct snd_pcm_hw_params *params)
427 {
428 const struct snd_interval *i;
429 const struct snd_mask *m;
430 int err;
431
432 if (!params->msbits) {
433 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
434 if (snd_interval_single(i))
435 params->msbits = snd_interval_value(i);
436 }
437
438 if (!params->rate_den) {
439 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
440 if (snd_interval_single(i)) {
441 params->rate_num = snd_interval_value(i);
442 params->rate_den = 1;
443 }
444 }
445
446 if (!params->fifo_size) {
447 m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
448 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
449 if (snd_mask_single(m) && snd_interval_single(i)) {
450 err = substream->ops->ioctl(substream,
451 SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
452 if (err < 0)
453 return err;
454 }
455 }
456
457 if (!params->info) {
458 params->info = substream->runtime->hw.info;
459 params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
460 SNDRV_PCM_INFO_DRAIN_TRIGGER);
461 if (!hw_support_mmap(substream))
462 params->info &= ~(SNDRV_PCM_INFO_MMAP |
463 SNDRV_PCM_INFO_MMAP_VALID);
464 }
465
466 return 0;
467 }
468
469 int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
470 struct snd_pcm_hw_params *params)
471 {
472 int err;
473
474 params->info = 0;
475 params->fifo_size = 0;
476 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
477 params->msbits = 0;
478 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
479 params->rate_num = 0;
480 params->rate_den = 0;
481 }
482
483 err = constrain_mask_params(substream, params);
484 if (err < 0)
485 return err;
486
487 err = constrain_interval_params(substream, params);
488 if (err < 0)
489 return err;
490
491 err = constrain_params_by_rules(substream, params);
492 if (err < 0)
493 return err;
494
495 params->rmask = 0;
496
497 return 0;
498 }
499 EXPORT_SYMBOL(snd_pcm_hw_refine);
500
501 static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
502 struct snd_pcm_hw_params __user * _params)
503 {
504 struct snd_pcm_hw_params *params;
505 int err;
506
507 params = memdup_user(_params, sizeof(*params));
508 if (IS_ERR(params))
509 return PTR_ERR(params);
510
511 err = snd_pcm_hw_refine(substream, params);
512 if (err < 0)
513 goto end;
514
515 err = fixup_unreferenced_params(substream, params);
516 if (err < 0)
517 goto end;
518
519 if (copy_to_user(_params, params, sizeof(*params)))
520 err = -EFAULT;
521 end:
522 kfree(params);
523 return err;
524 }
525
526 static int period_to_usecs(struct snd_pcm_runtime *runtime)
527 {
528 int usecs;
529
530 if (! runtime->rate)
531 return -1; /* invalid */
532
533 /* take 75% of period time as the deadline */
534 usecs = (750000 / runtime->rate) * runtime->period_size;
535 usecs += ((750000 % runtime->rate) * runtime->period_size) /
536 runtime->rate;
537
538 return usecs;
539 }
540
541 static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
542 {
543 snd_pcm_stream_lock_irq(substream);
544 if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
545 substream->runtime->status->state = state;
546 snd_pcm_stream_unlock_irq(substream);
547 }
548
549 static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
550 int event)
551 {
552 #ifdef CONFIG_SND_PCM_TIMER
553 if (substream->timer)
554 snd_timer_notify(substream->timer, event,
555 &substream->runtime->trigger_tstamp);
556 #endif
557 }
558
559 /**
560 * snd_pcm_hw_param_choose - choose a configuration defined by @params
561 * @pcm: PCM instance
562 * @params: the hw_params instance
563 *
564 * Choose one configuration from configuration space defined by @params.
565 * The configuration chosen is that obtained fixing in this order:
566 * first access, first format, first subformat, min channels,
567 * min rate, min period time, max buffer size, min tick time
568 *
569 * Return: Zero if successful, or a negative error code on failure.
570 */
571 static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
572 struct snd_pcm_hw_params *params)
573 {
574 static const int vars[] = {
575 SNDRV_PCM_HW_PARAM_ACCESS,
576 SNDRV_PCM_HW_PARAM_FORMAT,
577 SNDRV_PCM_HW_PARAM_SUBFORMAT,
578 SNDRV_PCM_HW_PARAM_CHANNELS,
579 SNDRV_PCM_HW_PARAM_RATE,
580 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
581 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
582 SNDRV_PCM_HW_PARAM_TICK_TIME,
583 -1
584 };
585 const int *v;
586 struct snd_mask old_mask;
587 struct snd_interval old_interval;
588 int changed;
589
590 for (v = vars; *v != -1; v++) {
591 /* Keep old parameter to trace. */
592 if (trace_hw_mask_param_enabled()) {
593 if (hw_is_mask(*v))
594 old_mask = *hw_param_mask(params, *v);
595 }
596 if (trace_hw_interval_param_enabled()) {
597 if (hw_is_interval(*v))
598 old_interval = *hw_param_interval(params, *v);
599 }
600 if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
601 changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
602 else
603 changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
604 if (changed < 0)
605 return changed;
606 if (changed == 0)
607 continue;
608
609 /* Trace the changed parameter. */
610 if (hw_is_mask(*v)) {
611 trace_hw_mask_param(pcm, *v, 0, &old_mask,
612 hw_param_mask(params, *v));
613 }
614 if (hw_is_interval(*v)) {
615 trace_hw_interval_param(pcm, *v, 0, &old_interval,
616 hw_param_interval(params, *v));
617 }
618 }
619
620 return 0;
621 }
622
623 static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
624 struct snd_pcm_hw_params *params)
625 {
626 struct snd_pcm_runtime *runtime;
627 int err, usecs;
628 unsigned int bits;
629 snd_pcm_uframes_t frames;
630
631 if (PCM_RUNTIME_CHECK(substream))
632 return -ENXIO;
633 runtime = substream->runtime;
634 snd_pcm_stream_lock_irq(substream);
635 switch (runtime->status->state) {
636 case SNDRV_PCM_STATE_OPEN:
637 case SNDRV_PCM_STATE_SETUP:
638 case SNDRV_PCM_STATE_PREPARED:
639 break;
640 default:
641 snd_pcm_stream_unlock_irq(substream);
642 return -EBADFD;
643 }
644 snd_pcm_stream_unlock_irq(substream);
645 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
646 if (!substream->oss.oss)
647 #endif
648 if (atomic_read(&substream->mmap_count))
649 return -EBADFD;
650
651 params->rmask = ~0U;
652 err = snd_pcm_hw_refine(substream, params);
653 if (err < 0)
654 goto _error;
655
656 err = snd_pcm_hw_params_choose(substream, params);
657 if (err < 0)
658 goto _error;
659
660 err = fixup_unreferenced_params(substream, params);
661 if (err < 0)
662 goto _error;
663
664 if (substream->ops->hw_params != NULL) {
665 err = substream->ops->hw_params(substream, params);
666 if (err < 0)
667 goto _error;
668 }
669
670 runtime->access = params_access(params);
671 runtime->format = params_format(params);
672 runtime->subformat = params_subformat(params);
673 runtime->channels = params_channels(params);
674 runtime->rate = params_rate(params);
675 runtime->period_size = params_period_size(params);
676 runtime->periods = params_periods(params);
677 runtime->buffer_size = params_buffer_size(params);
678 runtime->info = params->info;
679 runtime->rate_num = params->rate_num;
680 runtime->rate_den = params->rate_den;
681 runtime->no_period_wakeup =
682 (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
683 (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
684
685 bits = snd_pcm_format_physical_width(runtime->format);
686 runtime->sample_bits = bits;
687 bits *= runtime->channels;
688 runtime->frame_bits = bits;
689 frames = 1;
690 while (bits % 8 != 0) {
691 bits *= 2;
692 frames *= 2;
693 }
694 runtime->byte_align = bits / 8;
695 runtime->min_align = frames;
696
697 /* Default sw params */
698 runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
699 runtime->period_step = 1;
700 runtime->control->avail_min = runtime->period_size;
701 runtime->start_threshold = 1;
702 runtime->stop_threshold = runtime->buffer_size;
703 runtime->silence_threshold = 0;
704 runtime->silence_size = 0;
705 runtime->boundary = runtime->buffer_size;
706 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
707 runtime->boundary *= 2;
708
709 snd_pcm_timer_resolution_change(substream);
710 snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
711
712 if (pm_qos_request_active(&substream->latency_pm_qos_req))
713 pm_qos_remove_request(&substream->latency_pm_qos_req);
714 if ((usecs = period_to_usecs(runtime)) >= 0)
715 pm_qos_add_request(&substream->latency_pm_qos_req,
716 PM_QOS_CPU_DMA_LATENCY, usecs);
717 return 0;
718 _error:
719 /* hardware might be unusable from this time,
720 so we force application to retry to set
721 the correct hardware parameter settings */
722 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
723 if (substream->ops->hw_free != NULL)
724 substream->ops->hw_free(substream);
725 return err;
726 }
727
728 static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
729 struct snd_pcm_hw_params __user * _params)
730 {
731 struct snd_pcm_hw_params *params;
732 int err;
733
734 params = memdup_user(_params, sizeof(*params));
735 if (IS_ERR(params))
736 return PTR_ERR(params);
737
738 err = snd_pcm_hw_params(substream, params);
739 if (err < 0)
740 goto end;
741
742 if (copy_to_user(_params, params, sizeof(*params)))
743 err = -EFAULT;
744 end:
745 kfree(params);
746 return err;
747 }
748
749 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
750 {
751 struct snd_pcm_runtime *runtime;
752 int result = 0;
753
754 if (PCM_RUNTIME_CHECK(substream))
755 return -ENXIO;
756 runtime = substream->runtime;
757 snd_pcm_stream_lock_irq(substream);
758 switch (runtime->status->state) {
759 case SNDRV_PCM_STATE_SETUP:
760 case SNDRV_PCM_STATE_PREPARED:
761 break;
762 default:
763 snd_pcm_stream_unlock_irq(substream);
764 return -EBADFD;
765 }
766 snd_pcm_stream_unlock_irq(substream);
767 if (atomic_read(&substream->mmap_count))
768 return -EBADFD;
769 if (substream->ops->hw_free)
770 result = substream->ops->hw_free(substream);
771 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
772 pm_qos_remove_request(&substream->latency_pm_qos_req);
773 return result;
774 }
775
776 static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
777 struct snd_pcm_sw_params *params)
778 {
779 struct snd_pcm_runtime *runtime;
780 int err;
781
782 if (PCM_RUNTIME_CHECK(substream))
783 return -ENXIO;
784 runtime = substream->runtime;
785 snd_pcm_stream_lock_irq(substream);
786 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
787 snd_pcm_stream_unlock_irq(substream);
788 return -EBADFD;
789 }
790 snd_pcm_stream_unlock_irq(substream);
791
792 if (params->tstamp_mode < 0 ||
793 params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
794 return -EINVAL;
795 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
796 params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
797 return -EINVAL;
798 if (params->avail_min == 0)
799 return -EINVAL;
800 if (params->silence_size >= runtime->boundary) {
801 if (params->silence_threshold != 0)
802 return -EINVAL;
803 } else {
804 if (params->silence_size > params->silence_threshold)
805 return -EINVAL;
806 if (params->silence_threshold > runtime->buffer_size)
807 return -EINVAL;
808 }
809 err = 0;
810 snd_pcm_stream_lock_irq(substream);
811 runtime->tstamp_mode = params->tstamp_mode;
812 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
813 runtime->tstamp_type = params->tstamp_type;
814 runtime->period_step = params->period_step;
815 runtime->control->avail_min = params->avail_min;
816 runtime->start_threshold = params->start_threshold;
817 runtime->stop_threshold = params->stop_threshold;
818 runtime->silence_threshold = params->silence_threshold;
819 runtime->silence_size = params->silence_size;
820 params->boundary = runtime->boundary;
821 if (snd_pcm_running(substream)) {
822 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
823 runtime->silence_size > 0)
824 snd_pcm_playback_silence(substream, ULONG_MAX);
825 err = snd_pcm_update_state(substream, runtime);
826 }
827 snd_pcm_stream_unlock_irq(substream);
828 return err;
829 }
830
831 static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
832 struct snd_pcm_sw_params __user * _params)
833 {
834 struct snd_pcm_sw_params params;
835 int err;
836 if (copy_from_user(&params, _params, sizeof(params)))
837 return -EFAULT;
838 err = snd_pcm_sw_params(substream, &params);
839 if (copy_to_user(_params, &params, sizeof(params)))
840 return -EFAULT;
841 return err;
842 }
843
844 static inline snd_pcm_uframes_t
845 snd_pcm_calc_delay(struct snd_pcm_substream *substream)
846 {
847 snd_pcm_uframes_t delay;
848
849 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
850 delay = snd_pcm_playback_hw_avail(substream->runtime);
851 else
852 delay = snd_pcm_capture_avail(substream->runtime);
853 return delay + substream->runtime->delay;
854 }
855
856 int snd_pcm_status(struct snd_pcm_substream *substream,
857 struct snd_pcm_status *status)
858 {
859 struct snd_pcm_runtime *runtime = substream->runtime;
860
861 snd_pcm_stream_lock_irq(substream);
862
863 snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
864 &runtime->audio_tstamp_config);
865
866 /* backwards compatible behavior */
867 if (runtime->audio_tstamp_config.type_requested ==
868 SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
869 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
870 runtime->audio_tstamp_config.type_requested =
871 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
872 else
873 runtime->audio_tstamp_config.type_requested =
874 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
875 runtime->audio_tstamp_report.valid = 0;
876 } else
877 runtime->audio_tstamp_report.valid = 1;
878
879 status->state = runtime->status->state;
880 status->suspended_state = runtime->status->suspended_state;
881 if (status->state == SNDRV_PCM_STATE_OPEN)
882 goto _end;
883 status->trigger_tstamp = runtime->trigger_tstamp;
884 if (snd_pcm_running(substream)) {
885 snd_pcm_update_hw_ptr(substream);
886 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
887 status->tstamp = runtime->status->tstamp;
888 status->driver_tstamp = runtime->driver_tstamp;
889 status->audio_tstamp =
890 runtime->status->audio_tstamp;
891 if (runtime->audio_tstamp_report.valid == 1)
892 /* backwards compatibility, no report provided in COMPAT mode */
893 snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
894 &status->audio_tstamp_accuracy,
895 &runtime->audio_tstamp_report);
896
897 goto _tstamp_end;
898 }
899 } else {
900 /* get tstamp only in fallback mode and only if enabled */
901 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
902 snd_pcm_gettime(runtime, &status->tstamp);
903 }
904 _tstamp_end:
905 status->appl_ptr = runtime->control->appl_ptr;
906 status->hw_ptr = runtime->status->hw_ptr;
907 status->avail = snd_pcm_avail(substream);
908 status->delay = snd_pcm_running(substream) ?
909 snd_pcm_calc_delay(substream) : 0;
910 status->avail_max = runtime->avail_max;
911 status->overrange = runtime->overrange;
912 runtime->avail_max = 0;
913 runtime->overrange = 0;
914 _end:
915 snd_pcm_stream_unlock_irq(substream);
916 return 0;
917 }
918
919 static int snd_pcm_status_user(struct snd_pcm_substream *substream,
920 struct snd_pcm_status __user * _status,
921 bool ext)
922 {
923 struct snd_pcm_status status;
924 int res;
925
926 memset(&status, 0, sizeof(status));
927 /*
928 * with extension, parameters are read/write,
929 * get audio_tstamp_data from user,
930 * ignore rest of status structure
931 */
932 if (ext && get_user(status.audio_tstamp_data,
933 (u32 __user *)(&_status->audio_tstamp_data)))
934 return -EFAULT;
935 res = snd_pcm_status(substream, &status);
936 if (res < 0)
937 return res;
938 if (copy_to_user(_status, &status, sizeof(status)))
939 return -EFAULT;
940 return 0;
941 }
942
943 static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
944 struct snd_pcm_channel_info * info)
945 {
946 struct snd_pcm_runtime *runtime;
947 unsigned int channel;
948
949 channel = info->channel;
950 runtime = substream->runtime;
951 snd_pcm_stream_lock_irq(substream);
952 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
953 snd_pcm_stream_unlock_irq(substream);
954 return -EBADFD;
955 }
956 snd_pcm_stream_unlock_irq(substream);
957 if (channel >= runtime->channels)
958 return -EINVAL;
959 memset(info, 0, sizeof(*info));
960 info->channel = channel;
961 return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
962 }
963
964 static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
965 struct snd_pcm_channel_info __user * _info)
966 {
967 struct snd_pcm_channel_info info;
968 int res;
969
970 if (copy_from_user(&info, _info, sizeof(info)))
971 return -EFAULT;
972 res = snd_pcm_channel_info(substream, &info);
973 if (res < 0)
974 return res;
975 if (copy_to_user(_info, &info, sizeof(info)))
976 return -EFAULT;
977 return 0;
978 }
979
980 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
981 {
982 struct snd_pcm_runtime *runtime = substream->runtime;
983 if (runtime->trigger_master == NULL)
984 return;
985 if (runtime->trigger_master == substream) {
986 if (!runtime->trigger_tstamp_latched)
987 snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
988 } else {
989 snd_pcm_trigger_tstamp(runtime->trigger_master);
990 runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
991 }
992 runtime->trigger_master = NULL;
993 }
994
995 struct action_ops {
996 int (*pre_action)(struct snd_pcm_substream *substream, int state);
997 int (*do_action)(struct snd_pcm_substream *substream, int state);
998 void (*undo_action)(struct snd_pcm_substream *substream, int state);
999 void (*post_action)(struct snd_pcm_substream *substream, int state);
1000 };
1001
1002 /*
1003 * this functions is core for handling of linked stream
1004 * Note: the stream state might be changed also on failure
1005 * Note2: call with calling stream lock + link lock
1006 */
1007 static int snd_pcm_action_group(const struct action_ops *ops,
1008 struct snd_pcm_substream *substream,
1009 int state, int do_lock)
1010 {
1011 struct snd_pcm_substream *s = NULL;
1012 struct snd_pcm_substream *s1;
1013 int res = 0, depth = 1;
1014
1015 snd_pcm_group_for_each_entry(s, substream) {
1016 if (do_lock && s != substream) {
1017 if (s->pcm->nonatomic)
1018 mutex_lock_nested(&s->self_group.mutex, depth);
1019 else
1020 spin_lock_nested(&s->self_group.lock, depth);
1021 depth++;
1022 }
1023 res = ops->pre_action(s, state);
1024 if (res < 0)
1025 goto _unlock;
1026 }
1027 snd_pcm_group_for_each_entry(s, substream) {
1028 res = ops->do_action(s, state);
1029 if (res < 0) {
1030 if (ops->undo_action) {
1031 snd_pcm_group_for_each_entry(s1, substream) {
1032 if (s1 == s) /* failed stream */
1033 break;
1034 ops->undo_action(s1, state);
1035 }
1036 }
1037 s = NULL; /* unlock all */
1038 goto _unlock;
1039 }
1040 }
1041 snd_pcm_group_for_each_entry(s, substream) {
1042 ops->post_action(s, state);
1043 }
1044 _unlock:
1045 if (do_lock) {
1046 /* unlock streams */
1047 snd_pcm_group_for_each_entry(s1, substream) {
1048 if (s1 != substream) {
1049 if (s1->pcm->nonatomic)
1050 mutex_unlock(&s1->self_group.mutex);
1051 else
1052 spin_unlock(&s1->self_group.lock);
1053 }
1054 if (s1 == s) /* end */
1055 break;
1056 }
1057 }
1058 return res;
1059 }
1060
1061 /*
1062 * Note: call with stream lock
1063 */
1064 static int snd_pcm_action_single(const struct action_ops *ops,
1065 struct snd_pcm_substream *substream,
1066 int state)
1067 {
1068 int res;
1069
1070 res = ops->pre_action(substream, state);
1071 if (res < 0)
1072 return res;
1073 res = ops->do_action(substream, state);
1074 if (res == 0)
1075 ops->post_action(substream, state);
1076 else if (ops->undo_action)
1077 ops->undo_action(substream, state);
1078 return res;
1079 }
1080
1081 static void snd_pcm_group_assign(struct snd_pcm_substream *substream,
1082 struct snd_pcm_group *new_group)
1083 {
1084 substream->group = new_group;
1085 list_move(&substream->link_list, &new_group->substreams);
1086 }
1087
1088 /*
1089 * Unref and unlock the group, but keep the stream lock;
1090 * when the group becomes empty and no longer referred, destroy itself
1091 */
1092 static void snd_pcm_group_unref(struct snd_pcm_group *group,
1093 struct snd_pcm_substream *substream)
1094 {
1095 bool do_free;
1096
1097 if (!group)
1098 return;
1099 do_free = refcount_dec_and_test(&group->refs) &&
1100 list_empty(&group->substreams);
1101 snd_pcm_group_unlock(group, substream->pcm->nonatomic);
1102 if (do_free)
1103 kfree(group);
1104 }
1105
1106 /*
1107 * Lock the group inside a stream lock and reference it;
1108 * return the locked group object, or NULL if not linked
1109 */
1110 static struct snd_pcm_group *
1111 snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
1112 {
1113 bool nonatomic = substream->pcm->nonatomic;
1114 struct snd_pcm_group *group;
1115 bool trylock;
1116
1117 for (;;) {
1118 if (!snd_pcm_stream_linked(substream))
1119 return NULL;
1120 group = substream->group;
1121 /* block freeing the group object */
1122 refcount_inc(&group->refs);
1123
1124 trylock = nonatomic ? mutex_trylock(&group->mutex) :
1125 spin_trylock(&group->lock);
1126 if (trylock)
1127 break; /* OK */
1128
1129 /* re-lock for avoiding ABBA deadlock */
1130 snd_pcm_stream_unlock(substream);
1131 snd_pcm_group_lock(group, nonatomic);
1132 snd_pcm_stream_lock(substream);
1133
1134 /* check the group again; the above opens a small race window */
1135 if (substream->group == group)
1136 break; /* OK */
1137 /* group changed, try again */
1138 snd_pcm_group_unref(group, substream);
1139 }
1140 return group;
1141 }
1142
1143 /*
1144 * Note: call with stream lock
1145 */
1146 static int snd_pcm_action(const struct action_ops *ops,
1147 struct snd_pcm_substream *substream,
1148 int state)
1149 {
1150 struct snd_pcm_group *group;
1151 int res;
1152
1153 group = snd_pcm_stream_group_ref(substream);
1154 if (group)
1155 res = snd_pcm_action_group(ops, substream, state, 1);
1156 else
1157 res = snd_pcm_action_single(ops, substream, state);
1158 snd_pcm_group_unref(group, substream);
1159 return res;
1160 }
1161
1162 /*
1163 * Note: don't use any locks before
1164 */
1165 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
1166 struct snd_pcm_substream *substream,
1167 int state)
1168 {
1169 int res;
1170
1171 snd_pcm_stream_lock_irq(substream);
1172 res = snd_pcm_action(ops, substream, state);
1173 snd_pcm_stream_unlock_irq(substream);
1174 return res;
1175 }
1176
1177 /*
1178 */
1179 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1180 struct snd_pcm_substream *substream,
1181 int state)
1182 {
1183 int res;
1184
1185 /* Guarantee the group members won't change during non-atomic action */
1186 down_read(&snd_pcm_link_rwsem);
1187 if (snd_pcm_stream_linked(substream))
1188 res = snd_pcm_action_group(ops, substream, state, 0);
1189 else
1190 res = snd_pcm_action_single(ops, substream, state);
1191 up_read(&snd_pcm_link_rwsem);
1192 return res;
1193 }
1194
1195 /*
1196 * start callbacks
1197 */
1198 static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
1199 {
1200 struct snd_pcm_runtime *runtime = substream->runtime;
1201 if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
1202 return -EBADFD;
1203 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1204 !snd_pcm_playback_data(substream))
1205 return -EPIPE;
1206 runtime->trigger_tstamp_latched = false;
1207 runtime->trigger_master = substream;
1208 return 0;
1209 }
1210
1211 static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
1212 {
1213 if (substream->runtime->trigger_master != substream)
1214 return 0;
1215 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
1216 }
1217
1218 static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
1219 {
1220 if (substream->runtime->trigger_master == substream)
1221 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1222 }
1223
1224 static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
1225 {
1226 struct snd_pcm_runtime *runtime = substream->runtime;
1227 snd_pcm_trigger_tstamp(substream);
1228 runtime->hw_ptr_jiffies = jiffies;
1229 runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
1230 runtime->rate;
1231 runtime->status->state = state;
1232 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1233 runtime->silence_size > 0)
1234 snd_pcm_playback_silence(substream, ULONG_MAX);
1235 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
1236 }
1237
1238 static const struct action_ops snd_pcm_action_start = {
1239 .pre_action = snd_pcm_pre_start,
1240 .do_action = snd_pcm_do_start,
1241 .undo_action = snd_pcm_undo_start,
1242 .post_action = snd_pcm_post_start
1243 };
1244
1245 /**
1246 * snd_pcm_start - start all linked streams
1247 * @substream: the PCM substream instance
1248 *
1249 * Return: Zero if successful, or a negative error code.
1250 * The stream lock must be acquired before calling this function.
1251 */
1252 int snd_pcm_start(struct snd_pcm_substream *substream)
1253 {
1254 return snd_pcm_action(&snd_pcm_action_start, substream,
1255 SNDRV_PCM_STATE_RUNNING);
1256 }
1257
1258 /* take the stream lock and start the streams */
1259 static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
1260 {
1261 return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
1262 SNDRV_PCM_STATE_RUNNING);
1263 }
1264
1265 /*
1266 * stop callbacks
1267 */
1268 static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
1269 {
1270 struct snd_pcm_runtime *runtime = substream->runtime;
1271 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1272 return -EBADFD;
1273 runtime->trigger_master = substream;
1274 return 0;
1275 }
1276
1277 static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
1278 {
1279 if (substream->runtime->trigger_master == substream &&
1280 snd_pcm_running(substream))
1281 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1282 return 0; /* unconditonally stop all substreams */
1283 }
1284
1285 static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
1286 {
1287 struct snd_pcm_runtime *runtime = substream->runtime;
1288 if (runtime->status->state != state) {
1289 snd_pcm_trigger_tstamp(substream);
1290 runtime->status->state = state;
1291 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
1292 }
1293 wake_up(&runtime->sleep);
1294 wake_up(&runtime->tsleep);
1295 }
1296
1297 static const struct action_ops snd_pcm_action_stop = {
1298 .pre_action = snd_pcm_pre_stop,
1299 .do_action = snd_pcm_do_stop,
1300 .post_action = snd_pcm_post_stop
1301 };
1302
1303 /**
1304 * snd_pcm_stop - try to stop all running streams in the substream group
1305 * @substream: the PCM substream instance
1306 * @state: PCM state after stopping the stream
1307 *
1308 * The state of each stream is then changed to the given state unconditionally.
1309 *
1310 * Return: Zero if successful, or a negative error code.
1311 */
1312 int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
1313 {
1314 return snd_pcm_action(&snd_pcm_action_stop, substream, state);
1315 }
1316 EXPORT_SYMBOL(snd_pcm_stop);
1317
1318 /**
1319 * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1320 * @substream: the PCM substream
1321 *
1322 * After stopping, the state is changed to SETUP.
1323 * Unlike snd_pcm_stop(), this affects only the given stream.
1324 *
1325 * Return: Zero if succesful, or a negative error code.
1326 */
1327 int snd_pcm_drain_done(struct snd_pcm_substream *substream)
1328 {
1329 return snd_pcm_action_single(&snd_pcm_action_stop, substream,
1330 SNDRV_PCM_STATE_SETUP);
1331 }
1332
1333 /**
1334 * snd_pcm_stop_xrun - stop the running streams as XRUN
1335 * @substream: the PCM substream instance
1336 *
1337 * This stops the given running substream (and all linked substreams) as XRUN.
1338 * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1339 *
1340 * Return: Zero if successful, or a negative error code.
1341 */
1342 int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
1343 {
1344 unsigned long flags;
1345
1346 snd_pcm_stream_lock_irqsave(substream, flags);
1347 if (substream->runtime && snd_pcm_running(substream))
1348 __snd_pcm_xrun(substream);
1349 snd_pcm_stream_unlock_irqrestore(substream, flags);
1350 return 0;
1351 }
1352 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
1353
1354 /*
1355 * pause callbacks
1356 */
1357 static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
1358 {
1359 struct snd_pcm_runtime *runtime = substream->runtime;
1360 if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
1361 return -ENOSYS;
1362 if (push) {
1363 if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
1364 return -EBADFD;
1365 } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
1366 return -EBADFD;
1367 runtime->trigger_master = substream;
1368 return 0;
1369 }
1370
1371 static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
1372 {
1373 if (substream->runtime->trigger_master != substream)
1374 return 0;
1375 /* some drivers might use hw_ptr to recover from the pause -
1376 update the hw_ptr now */
1377 if (push)
1378 snd_pcm_update_hw_ptr(substream);
1379 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1380 * a delta between the current jiffies, this gives a large enough
1381 * delta, effectively to skip the check once.
1382 */
1383 substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
1384 return substream->ops->trigger(substream,
1385 push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
1386 SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
1387 }
1388
1389 static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
1390 {
1391 if (substream->runtime->trigger_master == substream)
1392 substream->ops->trigger(substream,
1393 push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
1394 SNDRV_PCM_TRIGGER_PAUSE_PUSH);
1395 }
1396
1397 static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
1398 {
1399 struct snd_pcm_runtime *runtime = substream->runtime;
1400 snd_pcm_trigger_tstamp(substream);
1401 if (push) {
1402 runtime->status->state = SNDRV_PCM_STATE_PAUSED;
1403 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
1404 wake_up(&runtime->sleep);
1405 wake_up(&runtime->tsleep);
1406 } else {
1407 runtime->status->state = SNDRV_PCM_STATE_RUNNING;
1408 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
1409 }
1410 }
1411
1412 static const struct action_ops snd_pcm_action_pause = {
1413 .pre_action = snd_pcm_pre_pause,
1414 .do_action = snd_pcm_do_pause,
1415 .undo_action = snd_pcm_undo_pause,
1416 .post_action = snd_pcm_post_pause
1417 };
1418
1419 /*
1420 * Push/release the pause for all linked streams.
1421 */
1422 static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
1423 {
1424 return snd_pcm_action(&snd_pcm_action_pause, substream, push);
1425 }
1426
1427 #ifdef CONFIG_PM
1428 /* suspend */
1429
1430 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
1431 {
1432 struct snd_pcm_runtime *runtime = substream->runtime;
1433 switch (runtime->status->state) {
1434 case SNDRV_PCM_STATE_SUSPENDED:
1435 return -EBUSY;
1436 /* unresumable PCM state; return -EBUSY for skipping suspend */
1437 case SNDRV_PCM_STATE_OPEN:
1438 case SNDRV_PCM_STATE_SETUP:
1439 case SNDRV_PCM_STATE_DISCONNECTED:
1440 return -EBUSY;
1441 }
1442 runtime->trigger_master = substream;
1443 return 0;
1444 }
1445
1446 static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
1447 {
1448 struct snd_pcm_runtime *runtime = substream->runtime;
1449 if (runtime->trigger_master != substream)
1450 return 0;
1451 if (! snd_pcm_running(substream))
1452 return 0;
1453 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1454 return 0; /* suspend unconditionally */
1455 }
1456
1457 static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
1458 {
1459 struct snd_pcm_runtime *runtime = substream->runtime;
1460 snd_pcm_trigger_tstamp(substream);
1461 runtime->status->suspended_state = runtime->status->state;
1462 runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
1463 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
1464 wake_up(&runtime->sleep);
1465 wake_up(&runtime->tsleep);
1466 }
1467
1468 static const struct action_ops snd_pcm_action_suspend = {
1469 .pre_action = snd_pcm_pre_suspend,
1470 .do_action = snd_pcm_do_suspend,
1471 .post_action = snd_pcm_post_suspend
1472 };
1473
1474 /*
1475 * snd_pcm_suspend - trigger SUSPEND to all linked streams
1476 * @substream: the PCM substream
1477 *
1478 * After this call, all streams are changed to SUSPENDED state.
1479 *
1480 * Return: Zero if successful, or a negative error code.
1481 */
1482 static int snd_pcm_suspend(struct snd_pcm_substream *substream)
1483 {
1484 int err;
1485 unsigned long flags;
1486
1487 snd_pcm_stream_lock_irqsave(substream, flags);
1488 err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
1489 snd_pcm_stream_unlock_irqrestore(substream, flags);
1490 return err;
1491 }
1492
1493 /**
1494 * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1495 * @pcm: the PCM instance
1496 *
1497 * After this call, all streams are changed to SUSPENDED state.
1498 *
1499 * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1500 */
1501 int snd_pcm_suspend_all(struct snd_pcm *pcm)
1502 {
1503 struct snd_pcm_substream *substream;
1504 int stream, err = 0;
1505
1506 if (! pcm)
1507 return 0;
1508
1509 for (stream = 0; stream < 2; stream++) {
1510 for (substream = pcm->streams[stream].substream;
1511 substream; substream = substream->next) {
1512 /* FIXME: the open/close code should lock this as well */
1513 if (substream->runtime == NULL)
1514 continue;
1515
1516 /*
1517 * Skip BE dai link PCM's that are internal and may
1518 * not have their substream ops set.
1519 */
1520 if (!substream->ops)
1521 continue;
1522
1523 err = snd_pcm_suspend(substream);
1524 if (err < 0 && err != -EBUSY)
1525 return err;
1526 }
1527 }
1528 return 0;
1529 }
1530 EXPORT_SYMBOL(snd_pcm_suspend_all);
1531
1532 /* resume */
1533
1534 static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
1535 {
1536 struct snd_pcm_runtime *runtime = substream->runtime;
1537 if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
1538 return -ENOSYS;
1539 runtime->trigger_master = substream;
1540 return 0;
1541 }
1542
1543 static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
1544 {
1545 struct snd_pcm_runtime *runtime = substream->runtime;
1546 if (runtime->trigger_master != substream)
1547 return 0;
1548 /* DMA not running previously? */
1549 if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING &&
1550 (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING ||
1551 substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
1552 return 0;
1553 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
1554 }
1555
1556 static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
1557 {
1558 if (substream->runtime->trigger_master == substream &&
1559 snd_pcm_running(substream))
1560 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1561 }
1562
1563 static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
1564 {
1565 struct snd_pcm_runtime *runtime = substream->runtime;
1566 snd_pcm_trigger_tstamp(substream);
1567 runtime->status->state = runtime->status->suspended_state;
1568 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
1569 }
1570
1571 static const struct action_ops snd_pcm_action_resume = {
1572 .pre_action = snd_pcm_pre_resume,
1573 .do_action = snd_pcm_do_resume,
1574 .undo_action = snd_pcm_undo_resume,
1575 .post_action = snd_pcm_post_resume
1576 };
1577
1578 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1579 {
1580 return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
1581 }
1582
1583 #else
1584
1585 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1586 {
1587 return -ENOSYS;
1588 }
1589
1590 #endif /* CONFIG_PM */
1591
1592 /*
1593 * xrun ioctl
1594 *
1595 * Change the RUNNING stream(s) to XRUN state.
1596 */
1597 static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1598 {
1599 struct snd_pcm_runtime *runtime = substream->runtime;
1600 int result;
1601
1602 snd_pcm_stream_lock_irq(substream);
1603 switch (runtime->status->state) {
1604 case SNDRV_PCM_STATE_XRUN:
1605 result = 0; /* already there */
1606 break;
1607 case SNDRV_PCM_STATE_RUNNING:
1608 __snd_pcm_xrun(substream);
1609 result = 0;
1610 break;
1611 default:
1612 result = -EBADFD;
1613 }
1614 snd_pcm_stream_unlock_irq(substream);
1615 return result;
1616 }
1617
1618 /*
1619 * reset ioctl
1620 */
1621 static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
1622 {
1623 struct snd_pcm_runtime *runtime = substream->runtime;
1624 switch (runtime->status->state) {
1625 case SNDRV_PCM_STATE_RUNNING:
1626 case SNDRV_PCM_STATE_PREPARED:
1627 case SNDRV_PCM_STATE_PAUSED:
1628 case SNDRV_PCM_STATE_SUSPENDED:
1629 return 0;
1630 default:
1631 return -EBADFD;
1632 }
1633 }
1634
1635 static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
1636 {
1637 struct snd_pcm_runtime *runtime = substream->runtime;
1638 int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
1639 if (err < 0)
1640 return err;
1641 runtime->hw_ptr_base = 0;
1642 runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
1643 runtime->status->hw_ptr % runtime->period_size;
1644 runtime->silence_start = runtime->status->hw_ptr;
1645 runtime->silence_filled = 0;
1646 return 0;
1647 }
1648
1649 static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
1650 {
1651 struct snd_pcm_runtime *runtime = substream->runtime;
1652 runtime->control->appl_ptr = runtime->status->hw_ptr;
1653 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1654 runtime->silence_size > 0)
1655 snd_pcm_playback_silence(substream, ULONG_MAX);
1656 }
1657
1658 static const struct action_ops snd_pcm_action_reset = {
1659 .pre_action = snd_pcm_pre_reset,
1660 .do_action = snd_pcm_do_reset,
1661 .post_action = snd_pcm_post_reset
1662 };
1663
1664 static int snd_pcm_reset(struct snd_pcm_substream *substream)
1665 {
1666 return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
1667 }
1668
1669 /*
1670 * prepare ioctl
1671 */
1672 /* we use the second argument for updating f_flags */
1673 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
1674 int f_flags)
1675 {
1676 struct snd_pcm_runtime *runtime = substream->runtime;
1677 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1678 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1679 return -EBADFD;
1680 if (snd_pcm_running(substream))
1681 return -EBUSY;
1682 substream->f_flags = f_flags;
1683 return 0;
1684 }
1685
1686 static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
1687 {
1688 int err;
1689 err = substream->ops->prepare(substream);
1690 if (err < 0)
1691 return err;
1692 return snd_pcm_do_reset(substream, 0);
1693 }
1694
1695 static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
1696 {
1697 struct snd_pcm_runtime *runtime = substream->runtime;
1698 runtime->control->appl_ptr = runtime->status->hw_ptr;
1699 snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
1700 }
1701
1702 static const struct action_ops snd_pcm_action_prepare = {
1703 .pre_action = snd_pcm_pre_prepare,
1704 .do_action = snd_pcm_do_prepare,
1705 .post_action = snd_pcm_post_prepare
1706 };
1707
1708 /**
1709 * snd_pcm_prepare - prepare the PCM substream to be triggerable
1710 * @substream: the PCM substream instance
1711 * @file: file to refer f_flags
1712 *
1713 * Return: Zero if successful, or a negative error code.
1714 */
1715 static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1716 struct file *file)
1717 {
1718 int f_flags;
1719
1720 if (file)
1721 f_flags = file->f_flags;
1722 else
1723 f_flags = substream->f_flags;
1724
1725 snd_pcm_stream_lock_irq(substream);
1726 switch (substream->runtime->status->state) {
1727 case SNDRV_PCM_STATE_PAUSED:
1728 snd_pcm_pause(substream, 0);
1729 /* fallthru */
1730 case SNDRV_PCM_STATE_SUSPENDED:
1731 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1732 break;
1733 }
1734 snd_pcm_stream_unlock_irq(substream);
1735
1736 return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
1737 substream, f_flags);
1738 }
1739
1740 /*
1741 * drain ioctl
1742 */
1743
1744 static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
1745 {
1746 struct snd_pcm_runtime *runtime = substream->runtime;
1747 switch (runtime->status->state) {
1748 case SNDRV_PCM_STATE_OPEN:
1749 case SNDRV_PCM_STATE_DISCONNECTED:
1750 case SNDRV_PCM_STATE_SUSPENDED:
1751 return -EBADFD;
1752 }
1753 runtime->trigger_master = substream;
1754 return 0;
1755 }
1756
1757 static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
1758 {
1759 struct snd_pcm_runtime *runtime = substream->runtime;
1760 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1761 switch (runtime->status->state) {
1762 case SNDRV_PCM_STATE_PREPARED:
1763 /* start playback stream if possible */
1764 if (! snd_pcm_playback_empty(substream)) {
1765 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
1766 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
1767 } else {
1768 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1769 }
1770 break;
1771 case SNDRV_PCM_STATE_RUNNING:
1772 runtime->status->state = SNDRV_PCM_STATE_DRAINING;
1773 break;
1774 case SNDRV_PCM_STATE_XRUN:
1775 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1776 break;
1777 default:
1778 break;
1779 }
1780 } else {
1781 /* stop running stream */
1782 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
1783 int new_state = snd_pcm_capture_avail(runtime) > 0 ?
1784 SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
1785 snd_pcm_do_stop(substream, new_state);
1786 snd_pcm_post_stop(substream, new_state);
1787 }
1788 }
1789
1790 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
1791 runtime->trigger_master == substream &&
1792 (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
1793 return substream->ops->trigger(substream,
1794 SNDRV_PCM_TRIGGER_DRAIN);
1795
1796 return 0;
1797 }
1798
1799 static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
1800 {
1801 }
1802
1803 static const struct action_ops snd_pcm_action_drain_init = {
1804 .pre_action = snd_pcm_pre_drain_init,
1805 .do_action = snd_pcm_do_drain_init,
1806 .post_action = snd_pcm_post_drain_init
1807 };
1808
1809 /*
1810 * Drain the stream(s).
1811 * When the substream is linked, sync until the draining of all playback streams
1812 * is finished.
1813 * After this call, all streams are supposed to be either SETUP or DRAINING
1814 * (capture only) state.
1815 */
1816 static int snd_pcm_drain(struct snd_pcm_substream *substream,
1817 struct file *file)
1818 {
1819 struct snd_card *card;
1820 struct snd_pcm_runtime *runtime;
1821 struct snd_pcm_substream *s;
1822 struct snd_pcm_group *group;
1823 wait_queue_entry_t wait;
1824 int result = 0;
1825 int nonblock = 0;
1826
1827 card = substream->pcm->card;
1828 runtime = substream->runtime;
1829
1830 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1831 return -EBADFD;
1832
1833 if (file) {
1834 if (file->f_flags & O_NONBLOCK)
1835 nonblock = 1;
1836 } else if (substream->f_flags & O_NONBLOCK)
1837 nonblock = 1;
1838
1839 snd_pcm_stream_lock_irq(substream);
1840 /* resume pause */
1841 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1842 snd_pcm_pause(substream, 0);
1843
1844 /* pre-start/stop - all running streams are changed to DRAINING state */
1845 result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
1846 if (result < 0)
1847 goto unlock;
1848 /* in non-blocking, we don't wait in ioctl but let caller poll */
1849 if (nonblock) {
1850 result = -EAGAIN;
1851 goto unlock;
1852 }
1853
1854 for (;;) {
1855 long tout;
1856 struct snd_pcm_runtime *to_check;
1857 if (signal_pending(current)) {
1858 result = -ERESTARTSYS;
1859 break;
1860 }
1861 /* find a substream to drain */
1862 to_check = NULL;
1863 group = snd_pcm_stream_group_ref(substream);
1864 snd_pcm_group_for_each_entry(s, substream) {
1865 if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
1866 continue;
1867 runtime = s->runtime;
1868 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
1869 to_check = runtime;
1870 break;
1871 }
1872 }
1873 snd_pcm_group_unref(group, substream);
1874 if (!to_check)
1875 break; /* all drained */
1876 init_waitqueue_entry(&wait, current);
1877 add_wait_queue(&to_check->sleep, &wait);
1878 snd_pcm_stream_unlock_irq(substream);
1879 if (runtime->no_period_wakeup)
1880 tout = MAX_SCHEDULE_TIMEOUT;
1881 else {
1882 tout = 10;
1883 if (runtime->rate) {
1884 long t = runtime->period_size * 2 / runtime->rate;
1885 tout = max(t, tout);
1886 }
1887 tout = msecs_to_jiffies(tout * 1000);
1888 }
1889 tout = schedule_timeout_interruptible(tout);
1890
1891 snd_pcm_stream_lock_irq(substream);
1892 group = snd_pcm_stream_group_ref(substream);
1893 snd_pcm_group_for_each_entry(s, substream) {
1894 if (s->runtime == to_check) {
1895 remove_wait_queue(&to_check->sleep, &wait);
1896 break;
1897 }
1898 }
1899 snd_pcm_group_unref(group, substream);
1900
1901 if (card->shutdown) {
1902 result = -ENODEV;
1903 break;
1904 }
1905 if (tout == 0) {
1906 if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
1907 result = -ESTRPIPE;
1908 else {
1909 dev_dbg(substream->pcm->card->dev,
1910 "playback drain error (DMA or IRQ trouble?)\n");
1911 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1912 result = -EIO;
1913 }
1914 break;
1915 }
1916 }
1917
1918 unlock:
1919 snd_pcm_stream_unlock_irq(substream);
1920
1921 return result;
1922 }
1923
1924 /*
1925 * drop ioctl
1926 *
1927 * Immediately put all linked substreams into SETUP state.
1928 */
1929 static int snd_pcm_drop(struct snd_pcm_substream *substream)
1930 {
1931 struct snd_pcm_runtime *runtime;
1932 int result = 0;
1933
1934 if (PCM_RUNTIME_CHECK(substream))
1935 return -ENXIO;
1936 runtime = substream->runtime;
1937
1938 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1939 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1940 return -EBADFD;
1941
1942 snd_pcm_stream_lock_irq(substream);
1943 /* resume pause */
1944 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1945 snd_pcm_pause(substream, 0);
1946
1947 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1948 /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
1949 snd_pcm_stream_unlock_irq(substream);
1950
1951 return result;
1952 }
1953
1954
1955 static bool is_pcm_file(struct file *file)
1956 {
1957 struct inode *inode = file_inode(file);
1958 struct snd_pcm *pcm;
1959 unsigned int minor;
1960
1961 if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
1962 return false;
1963 minor = iminor(inode);
1964 pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
1965 if (!pcm)
1966 pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
1967 if (!pcm)
1968 return false;
1969 snd_card_unref(pcm->card);
1970 return true;
1971 }
1972
1973 /*
1974 * PCM link handling
1975 */
1976 static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1977 {
1978 int res = 0;
1979 struct snd_pcm_file *pcm_file;
1980 struct snd_pcm_substream *substream1;
1981 struct snd_pcm_group *group, *target_group;
1982 bool nonatomic = substream->pcm->nonatomic;
1983 struct fd f = fdget(fd);
1984
1985 if (!f.file)
1986 return -EBADFD;
1987 if (!is_pcm_file(f.file)) {
1988 res = -EBADFD;
1989 goto _badf;
1990 }
1991 pcm_file = f.file->private_data;
1992 substream1 = pcm_file->substream;
1993 group = kzalloc(sizeof(*group), GFP_KERNEL);
1994 if (!group) {
1995 res = -ENOMEM;
1996 goto _nolock;
1997 }
1998 snd_pcm_group_init(group);
1999
2000 down_write(&snd_pcm_link_rwsem);
2001 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
2002 substream->runtime->status->state != substream1->runtime->status->state ||
2003 substream->pcm->nonatomic != substream1->pcm->nonatomic) {
2004 res = -EBADFD;
2005 goto _end;
2006 }
2007 if (snd_pcm_stream_linked(substream1)) {
2008 res = -EALREADY;
2009 goto _end;
2010 }
2011
2012 snd_pcm_stream_lock_irq(substream);
2013 if (!snd_pcm_stream_linked(substream)) {
2014 snd_pcm_group_assign(substream, group);
2015 group = NULL; /* assigned, don't free this one below */
2016 }
2017 target_group = substream->group;
2018 snd_pcm_stream_unlock_irq(substream);
2019
2020 snd_pcm_group_lock_irq(target_group, nonatomic);
2021 snd_pcm_stream_lock(substream1);
2022 snd_pcm_group_assign(substream1, target_group);
2023 snd_pcm_stream_unlock(substream1);
2024 snd_pcm_group_unlock_irq(target_group, nonatomic);
2025 _end:
2026 up_write(&snd_pcm_link_rwsem);
2027 _nolock:
2028 kfree(group);
2029 _badf:
2030 fdput(f);
2031 return res;
2032 }
2033
2034 static void relink_to_local(struct snd_pcm_substream *substream)
2035 {
2036 snd_pcm_stream_lock(substream);
2037 snd_pcm_group_assign(substream, &substream->self_group);
2038 snd_pcm_stream_unlock(substream);
2039 }
2040
2041 static int snd_pcm_unlink(struct snd_pcm_substream *substream)
2042 {
2043 struct snd_pcm_group *group;
2044 bool nonatomic = substream->pcm->nonatomic;
2045 bool do_free = false;
2046 int res = 0;
2047
2048 down_write(&snd_pcm_link_rwsem);
2049
2050 if (!snd_pcm_stream_linked(substream)) {
2051 res = -EALREADY;
2052 goto _end;
2053 }
2054
2055 group = substream->group;
2056 snd_pcm_group_lock_irq(group, nonatomic);
2057
2058 relink_to_local(substream);
2059
2060 /* detach the last stream, too */
2061 if (list_is_singular(&group->substreams)) {
2062 relink_to_local(list_first_entry(&group->substreams,
2063 struct snd_pcm_substream,
2064 link_list));
2065 do_free = !refcount_read(&group->refs);
2066 }
2067
2068 snd_pcm_group_unlock_irq(group, nonatomic);
2069 if (do_free)
2070 kfree(group);
2071
2072 _end:
2073 up_write(&snd_pcm_link_rwsem);
2074 return res;
2075 }
2076
2077 /*
2078 * hw configurator
2079 */
2080 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
2081 struct snd_pcm_hw_rule *rule)
2082 {
2083 struct snd_interval t;
2084 snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
2085 hw_param_interval_c(params, rule->deps[1]), &t);
2086 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2087 }
2088
2089 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
2090 struct snd_pcm_hw_rule *rule)
2091 {
2092 struct snd_interval t;
2093 snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
2094 hw_param_interval_c(params, rule->deps[1]), &t);
2095 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2096 }
2097
2098 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
2099 struct snd_pcm_hw_rule *rule)
2100 {
2101 struct snd_interval t;
2102 snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
2103 hw_param_interval_c(params, rule->deps[1]),
2104 (unsigned long) rule->private, &t);
2105 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2106 }
2107
2108 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
2109 struct snd_pcm_hw_rule *rule)
2110 {
2111 struct snd_interval t;
2112 snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
2113 (unsigned long) rule->private,
2114 hw_param_interval_c(params, rule->deps[1]), &t);
2115 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2116 }
2117
2118 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
2119 struct snd_pcm_hw_rule *rule)
2120 {
2121 unsigned int k;
2122 const struct snd_interval *i =
2123 hw_param_interval_c(params, rule->deps[0]);
2124 struct snd_mask m;
2125 struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2126 snd_mask_any(&m);
2127 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2128 int bits;
2129 if (! snd_mask_test(mask, k))
2130 continue;
2131 bits = snd_pcm_format_physical_width(k);
2132 if (bits <= 0)
2133 continue; /* ignore invalid formats */
2134 if ((unsigned)bits < i->min || (unsigned)bits > i->max)
2135 snd_mask_reset(&m, k);
2136 }
2137 return snd_mask_refine(mask, &m);
2138 }
2139
2140 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
2141 struct snd_pcm_hw_rule *rule)
2142 {
2143 struct snd_interval t;
2144 unsigned int k;
2145 t.min = UINT_MAX;
2146 t.max = 0;
2147 t.openmin = 0;
2148 t.openmax = 0;
2149 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2150 int bits;
2151 if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
2152 continue;
2153 bits = snd_pcm_format_physical_width(k);
2154 if (bits <= 0)
2155 continue; /* ignore invalid formats */
2156 if (t.min > (unsigned)bits)
2157 t.min = bits;
2158 if (t.max < (unsigned)bits)
2159 t.max = bits;
2160 }
2161 t.integer = 1;
2162 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2163 }
2164
2165 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
2166 #error "Change this table"
2167 #endif
2168
2169 static const unsigned int rates[] = {
2170 5512, 8000, 11025, 16000, 22050, 32000, 44100,
2171 48000, 64000, 88200, 96000, 176400, 192000
2172 };
2173
2174 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
2175 .count = ARRAY_SIZE(rates),
2176 .list = rates,
2177 };
2178
2179 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
2180 struct snd_pcm_hw_rule *rule)
2181 {
2182 struct snd_pcm_hardware *hw = rule->private;
2183 return snd_interval_list(hw_param_interval(params, rule->var),
2184 snd_pcm_known_rates.count,
2185 snd_pcm_known_rates.list, hw->rates);
2186 }
2187
2188 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
2189 struct snd_pcm_hw_rule *rule)
2190 {
2191 struct snd_interval t;
2192 struct snd_pcm_substream *substream = rule->private;
2193 t.min = 0;
2194 t.max = substream->buffer_bytes_max;
2195 t.openmin = 0;
2196 t.openmax = 0;
2197 t.integer = 1;
2198 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2199 }
2200
2201 int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
2202 {
2203 struct snd_pcm_runtime *runtime = substream->runtime;
2204 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
2205 int k, err;
2206
2207 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
2208 snd_mask_any(constrs_mask(constrs, k));
2209 }
2210
2211 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
2212 snd_interval_any(constrs_interval(constrs, k));
2213 }
2214
2215 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
2216 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
2217 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
2218 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
2219 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
2220
2221 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
2222 snd_pcm_hw_rule_format, NULL,
2223 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2224 if (err < 0)
2225 return err;
2226 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2227 snd_pcm_hw_rule_sample_bits, NULL,
2228 SNDRV_PCM_HW_PARAM_FORMAT,
2229 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2230 if (err < 0)
2231 return err;
2232 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2233 snd_pcm_hw_rule_div, NULL,
2234 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2235 if (err < 0)
2236 return err;
2237 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2238 snd_pcm_hw_rule_mul, NULL,
2239 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2240 if (err < 0)
2241 return err;
2242 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2243 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2244 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2245 if (err < 0)
2246 return err;
2247 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2248 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2249 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
2250 if (err < 0)
2251 return err;
2252 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2253 snd_pcm_hw_rule_div, NULL,
2254 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2255 if (err < 0)
2256 return err;
2257 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2258 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2259 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
2260 if (err < 0)
2261 return err;
2262 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2263 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2264 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
2265 if (err < 0)
2266 return err;
2267 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
2268 snd_pcm_hw_rule_div, NULL,
2269 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2270 if (err < 0)
2271 return err;
2272 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2273 snd_pcm_hw_rule_div, NULL,
2274 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2275 if (err < 0)
2276 return err;
2277 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2278 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2279 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2280 if (err < 0)
2281 return err;
2282 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2283 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2284 SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2285 if (err < 0)
2286 return err;
2287 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2288 snd_pcm_hw_rule_mul, NULL,
2289 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2290 if (err < 0)
2291 return err;
2292 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2293 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2294 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2295 if (err < 0)
2296 return err;
2297 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2298 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2299 SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2300 if (err < 0)
2301 return err;
2302 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2303 snd_pcm_hw_rule_muldivk, (void*) 8,
2304 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2305 if (err < 0)
2306 return err;
2307 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2308 snd_pcm_hw_rule_muldivk, (void*) 8,
2309 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2310 if (err < 0)
2311 return err;
2312 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
2313 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2314 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2315 if (err < 0)
2316 return err;
2317 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
2318 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2319 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2320 if (err < 0)
2321 return err;
2322 return 0;
2323 }
2324
2325 int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
2326 {
2327 struct snd_pcm_runtime *runtime = substream->runtime;
2328 struct snd_pcm_hardware *hw = &runtime->hw;
2329 int err;
2330 unsigned int mask = 0;
2331
2332 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2333 mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
2334 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2335 mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
2336 if (hw_support_mmap(substream)) {
2337 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2338 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
2339 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2340 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
2341 if (hw->info & SNDRV_PCM_INFO_COMPLEX)
2342 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX;
2343 }
2344 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
2345 if (err < 0)
2346 return err;
2347
2348 err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
2349 if (err < 0)
2350 return err;
2351
2352 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD);
2353 if (err < 0)
2354 return err;
2355
2356 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
2357 hw->channels_min, hw->channels_max);
2358 if (err < 0)
2359 return err;
2360
2361 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
2362 hw->rate_min, hw->rate_max);
2363 if (err < 0)
2364 return err;
2365
2366 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2367 hw->period_bytes_min, hw->period_bytes_max);
2368 if (err < 0)
2369 return err;
2370
2371 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
2372 hw->periods_min, hw->periods_max);
2373 if (err < 0)
2374 return err;
2375
2376 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2377 hw->period_bytes_min, hw->buffer_bytes_max);
2378 if (err < 0)
2379 return err;
2380
2381 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2382 snd_pcm_hw_rule_buffer_bytes_max, substream,
2383 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
2384 if (err < 0)
2385 return err;
2386
2387 /* FIXME: remove */
2388 if (runtime->dma_bytes) {
2389 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
2390 if (err < 0)
2391 return err;
2392 }
2393
2394 if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
2395 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2396 snd_pcm_hw_rule_rate, hw,
2397 SNDRV_PCM_HW_PARAM_RATE, -1);
2398 if (err < 0)
2399 return err;
2400 }
2401
2402 /* FIXME: this belong to lowlevel */
2403 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
2404
2405 return 0;
2406 }
2407
2408 static void pcm_release_private(struct snd_pcm_substream *substream)
2409 {
2410 if (snd_pcm_stream_linked(substream))
2411 snd_pcm_unlink(substream);
2412 }
2413
2414 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2415 {
2416 substream->ref_count--;
2417 if (substream->ref_count > 0)
2418 return;
2419
2420 snd_pcm_drop(substream);
2421 if (substream->hw_opened) {
2422 if (substream->ops->hw_free &&
2423 substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
2424 substream->ops->hw_free(substream);
2425 substream->ops->close(substream);
2426 substream->hw_opened = 0;
2427 }
2428 if (pm_qos_request_active(&substream->latency_pm_qos_req))
2429 pm_qos_remove_request(&substream->latency_pm_qos_req);
2430 if (substream->pcm_release) {
2431 substream->pcm_release(substream);
2432 substream->pcm_release = NULL;
2433 }
2434 snd_pcm_detach_substream(substream);
2435 }
2436 EXPORT_SYMBOL(snd_pcm_release_substream);
2437
2438 int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2439 struct file *file,
2440 struct snd_pcm_substream **rsubstream)
2441 {
2442 struct snd_pcm_substream *substream;
2443 int err;
2444
2445 err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2446 if (err < 0)
2447 return err;
2448 if (substream->ref_count > 1) {
2449 *rsubstream = substream;
2450 return 0;
2451 }
2452
2453 err = snd_pcm_hw_constraints_init(substream);
2454 if (err < 0) {
2455 pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
2456 goto error;
2457 }
2458
2459 if ((err = substream->ops->open(substream)) < 0)
2460 goto error;
2461
2462 substream->hw_opened = 1;
2463
2464 err = snd_pcm_hw_constraints_complete(substream);
2465 if (err < 0) {
2466 pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
2467 goto error;
2468 }
2469
2470 *rsubstream = substream;
2471 return 0;
2472
2473 error:
2474 snd_pcm_release_substream(substream);
2475 return err;
2476 }
2477 EXPORT_SYMBOL(snd_pcm_open_substream);
2478
2479 static int snd_pcm_open_file(struct file *file,
2480 struct snd_pcm *pcm,
2481 int stream)
2482 {
2483 struct snd_pcm_file *pcm_file;
2484 struct snd_pcm_substream *substream;
2485 int err;
2486
2487 err = snd_pcm_open_substream(pcm, stream, file, &substream);
2488 if (err < 0)
2489 return err;
2490
2491 pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2492 if (pcm_file == NULL) {
2493 snd_pcm_release_substream(substream);
2494 return -ENOMEM;
2495 }
2496 pcm_file->substream = substream;
2497 if (substream->ref_count == 1)
2498 substream->pcm_release = pcm_release_private;
2499 file->private_data = pcm_file;
2500
2501 return 0;
2502 }
2503
2504 static int snd_pcm_playback_open(struct inode *inode, struct file *file)
2505 {
2506 struct snd_pcm *pcm;
2507 int err = nonseekable_open(inode, file);
2508 if (err < 0)
2509 return err;
2510 pcm = snd_lookup_minor_data(iminor(inode),
2511 SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2512 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
2513 if (pcm)
2514 snd_card_unref(pcm->card);
2515 return err;
2516 }
2517
2518 static int snd_pcm_capture_open(struct inode *inode, struct file *file)
2519 {
2520 struct snd_pcm *pcm;
2521 int err = nonseekable_open(inode, file);
2522 if (err < 0)
2523 return err;
2524 pcm = snd_lookup_minor_data(iminor(inode),
2525 SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2526 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
2527 if (pcm)
2528 snd_card_unref(pcm->card);
2529 return err;
2530 }
2531
2532 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
2533 {
2534 int err;
2535 wait_queue_entry_t wait;
2536
2537 if (pcm == NULL) {
2538 err = -ENODEV;
2539 goto __error1;
2540 }
2541 err = snd_card_file_add(pcm->card, file);
2542 if (err < 0)
2543 goto __error1;
2544 if (!try_module_get(pcm->card->module)) {
2545 err = -EFAULT;
2546 goto __error2;
2547 }
2548 init_waitqueue_entry(&wait, current);
2549 add_wait_queue(&pcm->open_wait, &wait);
2550 mutex_lock(&pcm->open_mutex);
2551 while (1) {
2552 err = snd_pcm_open_file(file, pcm, stream);
2553 if (err >= 0)
2554 break;
2555 if (err == -EAGAIN) {
2556 if (file->f_flags & O_NONBLOCK) {
2557 err = -EBUSY;
2558 break;
2559 }
2560 } else
2561 break;
2562 set_current_state(TASK_INTERRUPTIBLE);
2563 mutex_unlock(&pcm->open_mutex);
2564 schedule();
2565 mutex_lock(&pcm->open_mutex);
2566 if (pcm->card->shutdown) {
2567 err = -ENODEV;
2568 break;
2569 }
2570 if (signal_pending(current)) {
2571 err = -ERESTARTSYS;
2572 break;
2573 }
2574 }
2575 remove_wait_queue(&pcm->open_wait, &wait);
2576 mutex_unlock(&pcm->open_mutex);
2577 if (err < 0)
2578 goto __error;
2579 return err;
2580
2581 __error:
2582 module_put(pcm->card->module);
2583 __error2:
2584 snd_card_file_remove(pcm->card, file);
2585 __error1:
2586 return err;
2587 }
2588
2589 static int snd_pcm_release(struct inode *inode, struct file *file)
2590 {
2591 struct snd_pcm *pcm;
2592 struct snd_pcm_substream *substream;
2593 struct snd_pcm_file *pcm_file;
2594
2595 pcm_file = file->private_data;
2596 substream = pcm_file->substream;
2597 if (snd_BUG_ON(!substream))
2598 return -ENXIO;
2599 pcm = substream->pcm;
2600 mutex_lock(&pcm->open_mutex);
2601 snd_pcm_release_substream(substream);
2602 kfree(pcm_file);
2603 mutex_unlock(&pcm->open_mutex);
2604 wake_up(&pcm->open_wait);
2605 module_put(pcm->card->module);
2606 snd_card_file_remove(pcm->card, file);
2607 return 0;
2608 }
2609
2610 /* check and update PCM state; return 0 or a negative error
2611 * call this inside PCM lock
2612 */
2613 static int do_pcm_hwsync(struct snd_pcm_substream *substream)
2614 {
2615 switch (substream->runtime->status->state) {
2616 case SNDRV_PCM_STATE_DRAINING:
2617 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
2618 return -EBADFD;
2619 /* Fall through */
2620 case SNDRV_PCM_STATE_RUNNING:
2621 return snd_pcm_update_hw_ptr(substream);
2622 case SNDRV_PCM_STATE_PREPARED:
2623 case SNDRV_PCM_STATE_PAUSED:
2624 return 0;
2625 case SNDRV_PCM_STATE_SUSPENDED:
2626 return -ESTRPIPE;
2627 case SNDRV_PCM_STATE_XRUN:
2628 return -EPIPE;
2629 default:
2630 return -EBADFD;
2631 }
2632 }
2633
2634 /* increase the appl_ptr; returns the processed frames or a negative error */
2635 static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2636 snd_pcm_uframes_t frames,
2637 snd_pcm_sframes_t avail)
2638 {
2639 struct snd_pcm_runtime *runtime = substream->runtime;
2640 snd_pcm_sframes_t appl_ptr;
2641 int ret;
2642
2643 if (avail <= 0)
2644 return 0;
2645 if (frames > (snd_pcm_uframes_t)avail)
2646 frames = avail;
2647 appl_ptr = runtime->control->appl_ptr + frames;
2648 if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
2649 appl_ptr -= runtime->boundary;
2650 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2651 return ret < 0 ? ret : frames;
2652 }
2653
2654 /* decrease the appl_ptr; returns the processed frames or zero for error */
2655 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2656 snd_pcm_uframes_t frames,
2657 snd_pcm_sframes_t avail)
2658 {
2659 struct snd_pcm_runtime *runtime = substream->runtime;
2660 snd_pcm_sframes_t appl_ptr;
2661 int ret;
2662
2663 if (avail <= 0)
2664 return 0;
2665 if (frames > (snd_pcm_uframes_t)avail)
2666 frames = avail;
2667 appl_ptr = runtime->control->appl_ptr - frames;
2668 if (appl_ptr < 0)
2669 appl_ptr += runtime->boundary;
2670 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2671 /* NOTE: we return zero for errors because PulseAudio gets depressed
2672 * upon receiving an error from rewind ioctl and stops processing
2673 * any longer. Returning zero means that no rewind is done, so
2674 * it's not absolutely wrong to answer like that.
2675 */
2676 return ret < 0 ? 0 : frames;
2677 }
2678
2679 static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream,
2680 snd_pcm_uframes_t frames)
2681 {
2682 snd_pcm_sframes_t ret;
2683
2684 if (frames == 0)
2685 return 0;
2686
2687 snd_pcm_stream_lock_irq(substream);
2688 ret = do_pcm_hwsync(substream);
2689 if (!ret)
2690 ret = rewind_appl_ptr(substream, frames,
2691 snd_pcm_hw_avail(substream));
2692 snd_pcm_stream_unlock_irq(substream);
2693 return ret;
2694 }
2695
2696 static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream,
2697 snd_pcm_uframes_t frames)
2698 {
2699 snd_pcm_sframes_t ret;
2700
2701 if (frames == 0)
2702 return 0;
2703
2704 snd_pcm_stream_lock_irq(substream);
2705 ret = do_pcm_hwsync(substream);
2706 if (!ret)
2707 ret = forward_appl_ptr(substream, frames,
2708 snd_pcm_avail(substream));
2709 snd_pcm_stream_unlock_irq(substream);
2710 return ret;
2711 }
2712
2713 static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
2714 {
2715 int err;
2716
2717 snd_pcm_stream_lock_irq(substream);
2718 err = do_pcm_hwsync(substream);
2719 snd_pcm_stream_unlock_irq(substream);
2720 return err;
2721 }
2722
2723 static int snd_pcm_delay(struct snd_pcm_substream *substream,
2724 snd_pcm_sframes_t *delay)
2725 {
2726 int err;
2727 snd_pcm_sframes_t n = 0;
2728
2729 snd_pcm_stream_lock_irq(substream);
2730 err = do_pcm_hwsync(substream);
2731 if (!err)
2732 n = snd_pcm_calc_delay(substream);
2733 snd_pcm_stream_unlock_irq(substream);
2734 if (!err)
2735 *delay = n;
2736 return err;
2737 }
2738
2739 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2740 struct snd_pcm_sync_ptr __user *_sync_ptr)
2741 {
2742 struct snd_pcm_runtime *runtime = substream->runtime;
2743 struct snd_pcm_sync_ptr sync_ptr;
2744 volatile struct snd_pcm_mmap_status *status;
2745 volatile struct snd_pcm_mmap_control *control;
2746 int err;
2747
2748 memset(&sync_ptr, 0, sizeof(sync_ptr));
2749 if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
2750 return -EFAULT;
2751 if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
2752 return -EFAULT;
2753 status = runtime->status;
2754 control = runtime->control;
2755 if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
2756 err = snd_pcm_hwsync(substream);
2757 if (err < 0)
2758 return err;
2759 }
2760 snd_pcm_stream_lock_irq(substream);
2761 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
2762 err = pcm_lib_apply_appl_ptr(substream,
2763 sync_ptr.c.control.appl_ptr);
2764 if (err < 0) {
2765 snd_pcm_stream_unlock_irq(substream);
2766 return err;
2767 }
2768 } else {
2769 sync_ptr.c.control.appl_ptr = control->appl_ptr;
2770 }
2771 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
2772 control->avail_min = sync_ptr.c.control.avail_min;
2773 else
2774 sync_ptr.c.control.avail_min = control->avail_min;
2775 sync_ptr.s.status.state = status->state;
2776 sync_ptr.s.status.hw_ptr = status->hw_ptr;
2777 sync_ptr.s.status.tstamp = status->tstamp;
2778 sync_ptr.s.status.suspended_state = status->suspended_state;
2779 sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
2780 snd_pcm_stream_unlock_irq(substream);
2781 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
2782 return -EFAULT;
2783 return 0;
2784 }
2785
2786 static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
2787 {
2788 struct snd_pcm_runtime *runtime = substream->runtime;
2789 int arg;
2790
2791 if (get_user(arg, _arg))
2792 return -EFAULT;
2793 if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
2794 return -EINVAL;
2795 runtime->tstamp_type = arg;
2796 return 0;
2797 }
2798
2799 static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream,
2800 struct snd_xferi __user *_xferi)
2801 {
2802 struct snd_xferi xferi;
2803 struct snd_pcm_runtime *runtime = substream->runtime;
2804 snd_pcm_sframes_t result;
2805
2806 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2807 return -EBADFD;
2808 if (put_user(0, &_xferi->result))
2809 return -EFAULT;
2810 if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
2811 return -EFAULT;
2812 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2813 result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
2814 else
2815 result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
2816 __put_user(result, &_xferi->result);
2817 return result < 0 ? result : 0;
2818 }
2819
2820 static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream,
2821 struct snd_xfern __user *_xfern)
2822 {
2823 struct snd_xfern xfern;
2824 struct snd_pcm_runtime *runtime = substream->runtime;
2825 void *bufs;
2826 snd_pcm_sframes_t result;
2827
2828 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2829 return -EBADFD;
2830 if (runtime->channels > 128)
2831 return -EINVAL;
2832 if (put_user(0, &_xfern->result))
2833 return -EFAULT;
2834 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2835 return -EFAULT;
2836
2837 bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels);
2838 if (IS_ERR(bufs))
2839 return PTR_ERR(bufs);
2840 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2841 result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
2842 else
2843 result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
2844 kfree(bufs);
2845 __put_user(result, &_xfern->result);
2846 return result < 0 ? result : 0;
2847 }
2848
2849 static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream,
2850 snd_pcm_uframes_t __user *_frames)
2851 {
2852 snd_pcm_uframes_t frames;
2853 snd_pcm_sframes_t result;
2854
2855 if (get_user(frames, _frames))
2856 return -EFAULT;
2857 if (put_user(0, _frames))
2858 return -EFAULT;
2859 result = snd_pcm_rewind(substream, frames);
2860 __put_user(result, _frames);
2861 return result < 0 ? result : 0;
2862 }
2863
2864 static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream,
2865 snd_pcm_uframes_t __user *_frames)
2866 {
2867 snd_pcm_uframes_t frames;
2868 snd_pcm_sframes_t result;
2869
2870 if (get_user(frames, _frames))
2871 return -EFAULT;
2872 if (put_user(0, _frames))
2873 return -EFAULT;
2874 result = snd_pcm_forward(substream, frames);
2875 __put_user(result, _frames);
2876 return result < 0 ? result : 0;
2877 }
2878
2879 static int snd_pcm_common_ioctl(struct file *file,
2880 struct snd_pcm_substream *substream,
2881 unsigned int cmd, void __user *arg)
2882 {
2883 struct snd_pcm_file *pcm_file = file->private_data;
2884 int res;
2885
2886 if (PCM_RUNTIME_CHECK(substream))
2887 return -ENXIO;
2888
2889 res = snd_power_wait(substream->pcm->card, SNDRV_CTL_POWER_D0);
2890 if (res < 0)
2891 return res;
2892
2893 switch (cmd) {
2894 case SNDRV_PCM_IOCTL_PVERSION:
2895 return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
2896 case SNDRV_PCM_IOCTL_INFO:
2897 return snd_pcm_info_user(substream, arg);
2898 case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */
2899 return 0;
2900 case SNDRV_PCM_IOCTL_TTSTAMP:
2901 return snd_pcm_tstamp(substream, arg);
2902 case SNDRV_PCM_IOCTL_USER_PVERSION:
2903 if (get_user(pcm_file->user_pversion,
2904 (unsigned int __user *)arg))
2905 return -EFAULT;
2906 return 0;
2907 case SNDRV_PCM_IOCTL_HW_REFINE:
2908 return snd_pcm_hw_refine_user(substream, arg);
2909 case SNDRV_PCM_IOCTL_HW_PARAMS:
2910 return snd_pcm_hw_params_user(substream, arg);
2911 case SNDRV_PCM_IOCTL_HW_FREE:
2912 return snd_pcm_hw_free(substream);
2913 case SNDRV_PCM_IOCTL_SW_PARAMS:
2914 return snd_pcm_sw_params_user(substream, arg);
2915 case SNDRV_PCM_IOCTL_STATUS:
2916 return snd_pcm_status_user(substream, arg, false);
2917 case SNDRV_PCM_IOCTL_STATUS_EXT:
2918 return snd_pcm_status_user(substream, arg, true);
2919 case SNDRV_PCM_IOCTL_CHANNEL_INFO:
2920 return snd_pcm_channel_info_user(substream, arg);
2921 case SNDRV_PCM_IOCTL_PREPARE:
2922 return snd_pcm_prepare(substream, file);
2923 case SNDRV_PCM_IOCTL_RESET:
2924 return snd_pcm_reset(substream);
2925 case SNDRV_PCM_IOCTL_START:
2926 return snd_pcm_start_lock_irq(substream);
2927 case SNDRV_PCM_IOCTL_LINK:
2928 return snd_pcm_link(substream, (int)(unsigned long) arg);
2929 case SNDRV_PCM_IOCTL_UNLINK:
2930 return snd_pcm_unlink(substream);
2931 case SNDRV_PCM_IOCTL_RESUME:
2932 return snd_pcm_resume(substream);
2933 case SNDRV_PCM_IOCTL_XRUN:
2934 return snd_pcm_xrun(substream);
2935 case SNDRV_PCM_IOCTL_HWSYNC:
2936 return snd_pcm_hwsync(substream);
2937 case SNDRV_PCM_IOCTL_DELAY:
2938 {
2939 snd_pcm_sframes_t delay;
2940 snd_pcm_sframes_t __user *res = arg;
2941 int err;
2942
2943 err = snd_pcm_delay(substream, &delay);
2944 if (err)
2945 return err;
2946 if (put_user(delay, res))
2947 return -EFAULT;
2948 return 0;
2949 }
2950 case SNDRV_PCM_IOCTL_SYNC_PTR:
2951 return snd_pcm_sync_ptr(substream, arg);
2952 #ifdef CONFIG_SND_SUPPORT_OLD_API
2953 case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
2954 return snd_pcm_hw_refine_old_user(substream, arg);
2955 case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
2956 return snd_pcm_hw_params_old_user(substream, arg);
2957 #endif
2958 case SNDRV_PCM_IOCTL_DRAIN:
2959 return snd_pcm_drain(substream, file);
2960 case SNDRV_PCM_IOCTL_DROP:
2961 return snd_pcm_drop(substream);
2962 case SNDRV_PCM_IOCTL_PAUSE:
2963 return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
2964 substream,
2965 (int)(unsigned long)arg);
2966 case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
2967 case SNDRV_PCM_IOCTL_READI_FRAMES:
2968 return snd_pcm_xferi_frames_ioctl(substream, arg);
2969 case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
2970 case SNDRV_PCM_IOCTL_READN_FRAMES:
2971 return snd_pcm_xfern_frames_ioctl(substream, arg);
2972 case SNDRV_PCM_IOCTL_REWIND:
2973 return snd_pcm_rewind_ioctl(substream, arg);
2974 case SNDRV_PCM_IOCTL_FORWARD:
2975 return snd_pcm_forward_ioctl(substream, arg);
2976 }
2977 pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
2978 return -ENOTTY;
2979 }
2980
2981 static long snd_pcm_ioctl(struct file *file, unsigned int cmd,
2982 unsigned long arg)
2983 {
2984 struct snd_pcm_file *pcm_file;
2985
2986 pcm_file = file->private_data;
2987
2988 if (((cmd >> 8) & 0xff) != 'A')
2989 return -ENOTTY;
2990
2991 return snd_pcm_common_ioctl(file, pcm_file->substream, cmd,
2992 (void __user *)arg);
2993 }
2994
2995 /**
2996 * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
2997 * @substream: PCM substream
2998 * @cmd: IOCTL cmd
2999 * @arg: IOCTL argument
3000 *
3001 * The function is provided primarily for OSS layer and USB gadget drivers,
3002 * and it allows only the limited set of ioctls (hw_params, sw_params,
3003 * prepare, start, drain, drop, forward).
3004 */
3005 int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3006 unsigned int cmd, void *arg)
3007 {
3008 snd_pcm_uframes_t *frames = arg;
3009 snd_pcm_sframes_t result;
3010
3011 switch (cmd) {
3012 case SNDRV_PCM_IOCTL_FORWARD:
3013 {
3014 /* provided only for OSS; capture-only and no value returned */
3015 if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
3016 return -EINVAL;
3017 result = snd_pcm_forward(substream, *frames);
3018 return result < 0 ? result : 0;
3019 }
3020 case SNDRV_PCM_IOCTL_HW_PARAMS:
3021 return snd_pcm_hw_params(substream, arg);
3022 case SNDRV_PCM_IOCTL_SW_PARAMS:
3023 return snd_pcm_sw_params(substream, arg);
3024 case SNDRV_PCM_IOCTL_PREPARE:
3025 return snd_pcm_prepare(substream, NULL);
3026 case SNDRV_PCM_IOCTL_START:
3027 return snd_pcm_start_lock_irq(substream);
3028 case SNDRV_PCM_IOCTL_DRAIN:
3029 return snd_pcm_drain(substream, NULL);
3030 case SNDRV_PCM_IOCTL_DROP:
3031 return snd_pcm_drop(substream);
3032 case SNDRV_PCM_IOCTL_DELAY:
3033 return snd_pcm_delay(substream, frames);
3034 default:
3035 return -EINVAL;
3036 }
3037 }
3038 EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
3039
3040 static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
3041 loff_t * offset)
3042 {
3043 struct snd_pcm_file *pcm_file;
3044 struct snd_pcm_substream *substream;
3045 struct snd_pcm_runtime *runtime;
3046 snd_pcm_sframes_t result;
3047
3048 pcm_file = file->private_data;
3049 substream = pcm_file->substream;
3050 if (PCM_RUNTIME_CHECK(substream))
3051 return -ENXIO;
3052 runtime = substream->runtime;
3053 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3054 return -EBADFD;
3055 if (!frame_aligned(runtime, count))
3056 return -EINVAL;
3057 count = bytes_to_frames(runtime, count);
3058 result = snd_pcm_lib_read(substream, buf, count);
3059 if (result > 0)
3060 result = frames_to_bytes(runtime, result);
3061 return result;
3062 }
3063
3064 static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
3065 size_t count, loff_t * offset)
3066 {
3067 struct snd_pcm_file *pcm_file;
3068 struct snd_pcm_substream *substream;
3069 struct snd_pcm_runtime *runtime;
3070 snd_pcm_sframes_t result;
3071
3072 pcm_file = file->private_data;
3073 substream = pcm_file->substream;
3074 if (PCM_RUNTIME_CHECK(substream))
3075 return -ENXIO;
3076 runtime = substream->runtime;
3077 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3078 return -EBADFD;
3079 if (!frame_aligned(runtime, count))
3080 return -EINVAL;
3081 count = bytes_to_frames(runtime, count);
3082 result = snd_pcm_lib_write(substream, buf, count);
3083 if (result > 0)
3084 result = frames_to_bytes(runtime, result);
3085 return result;
3086 }
3087
3088 static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
3089 {
3090 struct snd_pcm_file *pcm_file;
3091 struct snd_pcm_substream *substream;
3092 struct snd_pcm_runtime *runtime;
3093 snd_pcm_sframes_t result;
3094 unsigned long i;
3095 void __user **bufs;
3096 snd_pcm_uframes_t frames;
3097
3098 pcm_file = iocb->ki_filp->private_data;
3099 substream = pcm_file->substream;
3100 if (PCM_RUNTIME_CHECK(substream))
3101 return -ENXIO;
3102 runtime = substream->runtime;
3103 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3104 return -EBADFD;
3105 if (!iter_is_iovec(to))
3106 return -EINVAL;
3107 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
3108 return -EINVAL;
3109 if (!frame_aligned(runtime, to->iov->iov_len))
3110 return -EINVAL;
3111 frames = bytes_to_samples(runtime, to->iov->iov_len);
3112 bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
3113 if (bufs == NULL)
3114 return -ENOMEM;
3115 for (i = 0; i < to->nr_segs; ++i)
3116 bufs[i] = to->iov[i].iov_base;
3117 result = snd_pcm_lib_readv(substream, bufs, frames);
3118 if (result > 0)
3119 result = frames_to_bytes(runtime, result);
3120 kfree(bufs);
3121 return result;
3122 }
3123
3124 static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
3125 {
3126 struct snd_pcm_file *pcm_file;
3127 struct snd_pcm_substream *substream;
3128 struct snd_pcm_runtime *runtime;
3129 snd_pcm_sframes_t result;
3130 unsigned long i;
3131 void __user **bufs;
3132 snd_pcm_uframes_t frames;
3133
3134 pcm_file = iocb->ki_filp->private_data;
3135 substream = pcm_file->substream;
3136 if (PCM_RUNTIME_CHECK(substream))
3137 return -ENXIO;
3138 runtime = substream->runtime;
3139 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3140 return -EBADFD;
3141 if (!iter_is_iovec(from))
3142 return -EINVAL;
3143 if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
3144 !frame_aligned(runtime, from->iov->iov_len))
3145 return -EINVAL;
3146 frames = bytes_to_samples(runtime, from->iov->iov_len);
3147 bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
3148 if (bufs == NULL)
3149 return -ENOMEM;
3150 for (i = 0; i < from->nr_segs; ++i)
3151 bufs[i] = from->iov[i].iov_base;
3152 result = snd_pcm_lib_writev(substream, bufs, frames);
3153 if (result > 0)
3154 result = frames_to_bytes(runtime, result);
3155 kfree(bufs);
3156 return result;
3157 }
3158
3159 static __poll_t snd_pcm_poll(struct file *file, poll_table *wait)
3160 {
3161 struct snd_pcm_file *pcm_file;
3162 struct snd_pcm_substream *substream;
3163 struct snd_pcm_runtime *runtime;
3164 __poll_t mask, ok;
3165 snd_pcm_uframes_t avail;
3166
3167 pcm_file = file->private_data;
3168
3169 substream = pcm_file->substream;
3170 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3171 ok = EPOLLOUT | EPOLLWRNORM;
3172 else
3173 ok = EPOLLIN | EPOLLRDNORM;
3174 if (PCM_RUNTIME_CHECK(substream))
3175 return ok | EPOLLERR;
3176
3177 runtime = substream->runtime;
3178 poll_wait(file, &runtime->sleep, wait);
3179
3180 mask = 0;
3181 snd_pcm_stream_lock_irq(substream);
3182 avail = snd_pcm_avail(substream);
3183 switch (runtime->status->state) {
3184 case SNDRV_PCM_STATE_RUNNING:
3185 case SNDRV_PCM_STATE_PREPARED:
3186 case SNDRV_PCM_STATE_PAUSED:
3187 if (avail >= runtime->control->avail_min)
3188 mask = ok;
3189 break;
3190 case SNDRV_PCM_STATE_DRAINING:
3191 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
3192 mask = ok;
3193 if (!avail)
3194 mask |= EPOLLERR;
3195 }
3196 break;
3197 default:
3198 mask = ok | EPOLLERR;
3199 break;
3200 }
3201 snd_pcm_stream_unlock_irq(substream);
3202 return mask;
3203 }
3204
3205 /*
3206 * mmap support
3207 */
3208
3209 /*
3210 * Only on coherent architectures, we can mmap the status and the control records
3211 * for effcient data transfer. On others, we have to use HWSYNC ioctl...
3212 */
3213 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3214 /*
3215 * mmap status record
3216 */
3217 static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3218 {
3219 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3220 struct snd_pcm_runtime *runtime;
3221
3222 if (substream == NULL)
3223 return VM_FAULT_SIGBUS;
3224 runtime = substream->runtime;
3225 vmf->page = virt_to_page(runtime->status);
3226 get_page(vmf->page);
3227 return 0;
3228 }
3229
3230 static const struct vm_operations_struct snd_pcm_vm_ops_status =
3231 {
3232 .fault = snd_pcm_mmap_status_fault,
3233 };
3234
3235 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3236 struct vm_area_struct *area)
3237 {
3238 long size;
3239 if (!(area->vm_flags & VM_READ))
3240 return -EINVAL;
3241 size = area->vm_end - area->vm_start;
3242 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3243 return -EINVAL;
3244 area->vm_ops = &snd_pcm_vm_ops_status;
3245 area->vm_private_data = substream;
3246 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3247 return 0;
3248 }
3249
3250 /*
3251 * mmap control record
3252 */
3253 static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3254 {
3255 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3256 struct snd_pcm_runtime *runtime;
3257
3258 if (substream == NULL)
3259 return VM_FAULT_SIGBUS;
3260 runtime = substream->runtime;
3261 vmf->page = virt_to_page(runtime->control);
3262 get_page(vmf->page);
3263 return 0;
3264 }
3265
3266 static const struct vm_operations_struct snd_pcm_vm_ops_control =
3267 {
3268 .fault = snd_pcm_mmap_control_fault,
3269 };
3270
3271 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3272 struct vm_area_struct *area)
3273 {
3274 long size;
3275 if (!(area->vm_flags & VM_READ))
3276 return -EINVAL;
3277 size = area->vm_end - area->vm_start;
3278 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3279 return -EINVAL;
3280 area->vm_ops = &snd_pcm_vm_ops_control;
3281 area->vm_private_data = substream;
3282 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3283 return 0;
3284 }
3285
3286 static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
3287 {
3288 if (pcm_file->no_compat_mmap)
3289 return false;
3290 /* See pcm_control_mmap_allowed() below.
3291 * Since older alsa-lib requires both status and control mmaps to be
3292 * coupled, we have to disable the status mmap for old alsa-lib, too.
3293 */
3294 if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
3295 (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
3296 return false;
3297 return true;
3298 }
3299
3300 static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
3301 {
3302 if (pcm_file->no_compat_mmap)
3303 return false;
3304 /* Disallow the control mmap when SYNC_APPLPTR flag is set;
3305 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3306 * thus it effectively assures the manual update of appl_ptr.
3307 */
3308 if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
3309 return false;
3310 return true;
3311 }
3312
3313 #else /* ! coherent mmap */
3314 /*
3315 * don't support mmap for status and control records.
3316 */
3317 #define pcm_status_mmap_allowed(pcm_file) false
3318 #define pcm_control_mmap_allowed(pcm_file) false
3319
3320 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3321 struct vm_area_struct *area)
3322 {
3323 return -ENXIO;
3324 }
3325 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3326 struct vm_area_struct *area)
3327 {
3328 return -ENXIO;
3329 }
3330 #endif /* coherent mmap */
3331
3332 static inline struct page *
3333 snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
3334 {
3335 void *vaddr = substream->runtime->dma_area + ofs;
3336 return virt_to_page(vaddr);
3337 }
3338
3339 /*
3340 * fault callback for mmapping a RAM page
3341 */
3342 static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3343 {
3344 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3345 struct snd_pcm_runtime *runtime;
3346 unsigned long offset;
3347 struct page * page;
3348 size_t dma_bytes;
3349
3350 if (substream == NULL)
3351 return VM_FAULT_SIGBUS;
3352 runtime = substream->runtime;
3353 offset = vmf->pgoff << PAGE_SHIFT;
3354 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3355 if (offset > dma_bytes - PAGE_SIZE)
3356 return VM_FAULT_SIGBUS;
3357 if (substream->ops->page)
3358 page = substream->ops->page(substream, offset);
3359 else
3360 page = snd_pcm_default_page_ops(substream, offset);
3361 if (!page)
3362 return VM_FAULT_SIGBUS;
3363 get_page(page);
3364 vmf->page = page;
3365 return 0;
3366 }
3367
3368 static const struct vm_operations_struct snd_pcm_vm_ops_data = {
3369 .open = snd_pcm_mmap_data_open,
3370 .close = snd_pcm_mmap_data_close,
3371 };
3372
3373 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3374 .open = snd_pcm_mmap_data_open,
3375 .close = snd_pcm_mmap_data_close,
3376 .fault = snd_pcm_mmap_data_fault,
3377 };
3378
3379 /*
3380 * mmap the DMA buffer on RAM
3381 */
3382
3383 /**
3384 * snd_pcm_lib_default_mmap - Default PCM data mmap function
3385 * @substream: PCM substream
3386 * @area: VMA
3387 *
3388 * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
3389 * this function is invoked implicitly.
3390 */
3391 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3392 struct vm_area_struct *area)
3393 {
3394 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3395 #ifdef CONFIG_GENERIC_ALLOCATOR
3396 if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
3397 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
3398 return remap_pfn_range(area, area->vm_start,
3399 substream->dma_buffer.addr >> PAGE_SHIFT,
3400 area->vm_end - area->vm_start, area->vm_page_prot);
3401 }
3402 #endif /* CONFIG_GENERIC_ALLOCATOR */
3403 #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
3404 if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
3405 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
3406 return dma_mmap_coherent(substream->dma_buffer.dev.dev,
3407 area,
3408 substream->runtime->dma_area,
3409 substream->runtime->dma_addr,
3410 substream->runtime->dma_bytes);
3411 #endif /* CONFIG_X86 */
3412 /* mmap with fault handler */
3413 area->vm_ops = &snd_pcm_vm_ops_data_fault;
3414 return 0;
3415 }
3416 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3417
3418 /*
3419 * mmap the DMA buffer on I/O memory area
3420 */
3421 #if SNDRV_PCM_INFO_MMAP_IOMEM
3422 /**
3423 * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3424 * @substream: PCM substream
3425 * @area: VMA
3426 *
3427 * When your hardware uses the iomapped pages as the hardware buffer and
3428 * wants to mmap it, pass this function as mmap pcm_ops. Note that this
3429 * is supposed to work only on limited architectures.
3430 */
3431 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3432 struct vm_area_struct *area)
3433 {
3434 struct snd_pcm_runtime *runtime = substream->runtime;
3435
3436 area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3437 return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3438 }
3439 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
3440 #endif /* SNDRV_PCM_INFO_MMAP */
3441
3442 /*
3443 * mmap DMA buffer
3444 */
3445 int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
3446 struct vm_area_struct *area)
3447 {
3448 struct snd_pcm_runtime *runtime;
3449 long size;
3450 unsigned long offset;
3451 size_t dma_bytes;
3452 int err;
3453
3454 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3455 if (!(area->vm_flags & (VM_WRITE|VM_READ)))
3456 return -EINVAL;
3457 } else {
3458 if (!(area->vm_flags & VM_READ))
3459 return -EINVAL;
3460 }
3461 runtime = substream->runtime;
3462 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3463 return -EBADFD;
3464 if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
3465 return -ENXIO;
3466 if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
3467 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
3468 return -EINVAL;
3469 size = area->vm_end - area->vm_start;
3470 offset = area->vm_pgoff << PAGE_SHIFT;
3471 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3472 if ((size_t)size > dma_bytes)
3473 return -EINVAL;
3474 if (offset > dma_bytes - size)
3475 return -EINVAL;
3476
3477 area->vm_ops = &snd_pcm_vm_ops_data;
3478 area->vm_private_data = substream;
3479 if (substream->ops->mmap)
3480 err = substream->ops->mmap(substream, area);
3481 else
3482 err = snd_pcm_lib_default_mmap(substream, area);
3483 if (!err)
3484 atomic_inc(&substream->mmap_count);
3485 return err;
3486 }
3487 EXPORT_SYMBOL(snd_pcm_mmap_data);
3488
3489 static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
3490 {
3491 struct snd_pcm_file * pcm_file;
3492 struct snd_pcm_substream *substream;
3493 unsigned long offset;
3494
3495 pcm_file = file->private_data;
3496 substream = pcm_file->substream;
3497 if (PCM_RUNTIME_CHECK(substream))
3498 return -ENXIO;
3499
3500 offset = area->vm_pgoff << PAGE_SHIFT;
3501 switch (offset) {
3502 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3503 if (!pcm_status_mmap_allowed(pcm_file))
3504 return -ENXIO;
3505 return snd_pcm_mmap_status(substream, file, area);
3506 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3507 if (!pcm_control_mmap_allowed(pcm_file))
3508 return -ENXIO;
3509 return snd_pcm_mmap_control(substream, file, area);
3510 default:
3511 return snd_pcm_mmap_data(substream, file, area);
3512 }
3513 return 0;
3514 }
3515
3516 static int snd_pcm_fasync(int fd, struct file * file, int on)
3517 {
3518 struct snd_pcm_file * pcm_file;
3519 struct snd_pcm_substream *substream;
3520 struct snd_pcm_runtime *runtime;
3521
3522 pcm_file = file->private_data;
3523 substream = pcm_file->substream;
3524 if (PCM_RUNTIME_CHECK(substream))
3525 return -ENXIO;
3526 runtime = substream->runtime;
3527 return fasync_helper(fd, file, on, &runtime->fasync);
3528 }
3529
3530 /*
3531 * ioctl32 compat
3532 */
3533 #ifdef CONFIG_COMPAT
3534 #include "pcm_compat.c"
3535 #else
3536 #define snd_pcm_ioctl_compat NULL
3537 #endif
3538
3539 /*
3540 * To be removed helpers to keep binary compatibility
3541 */
3542
3543 #ifdef CONFIG_SND_SUPPORT_OLD_API
3544 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
3545 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
3546
3547 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
3548 struct snd_pcm_hw_params_old *oparams)
3549 {
3550 unsigned int i;
3551
3552 memset(params, 0, sizeof(*params));
3553 params->flags = oparams->flags;
3554 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3555 params->masks[i].bits[0] = oparams->masks[i];
3556 memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
3557 params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
3558 params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
3559 params->info = oparams->info;
3560 params->msbits = oparams->msbits;
3561 params->rate_num = oparams->rate_num;
3562 params->rate_den = oparams->rate_den;
3563 params->fifo_size = oparams->fifo_size;
3564 }
3565
3566 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
3567 struct snd_pcm_hw_params *params)
3568 {
3569 unsigned int i;
3570
3571 memset(oparams, 0, sizeof(*oparams));
3572 oparams->flags = params->flags;
3573 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3574 oparams->masks[i] = params->masks[i].bits[0];
3575 memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
3576 oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
3577 oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
3578 oparams->info = params->info;
3579 oparams->msbits = params->msbits;
3580 oparams->rate_num = params->rate_num;
3581 oparams->rate_den = params->rate_den;
3582 oparams->fifo_size = params->fifo_size;
3583 }
3584
3585 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3586 struct snd_pcm_hw_params_old __user * _oparams)
3587 {
3588 struct snd_pcm_hw_params *params;
3589 struct snd_pcm_hw_params_old *oparams = NULL;
3590 int err;
3591
3592 params = kmalloc(sizeof(*params), GFP_KERNEL);
3593 if (!params)
3594 return -ENOMEM;
3595
3596 oparams = memdup_user(_oparams, sizeof(*oparams));
3597 if (IS_ERR(oparams)) {
3598 err = PTR_ERR(oparams);
3599 goto out;
3600 }
3601 snd_pcm_hw_convert_from_old_params(params, oparams);
3602 err = snd_pcm_hw_refine(substream, params);
3603 if (err < 0)
3604 goto out_old;
3605
3606 err = fixup_unreferenced_params(substream, params);
3607 if (err < 0)
3608 goto out_old;
3609
3610 snd_pcm_hw_convert_to_old_params(oparams, params);
3611 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3612 err = -EFAULT;
3613 out_old:
3614 kfree(oparams);
3615 out:
3616 kfree(params);
3617 return err;
3618 }
3619
3620 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3621 struct snd_pcm_hw_params_old __user * _oparams)
3622 {
3623 struct snd_pcm_hw_params *params;
3624 struct snd_pcm_hw_params_old *oparams = NULL;
3625 int err;
3626
3627 params = kmalloc(sizeof(*params), GFP_KERNEL);
3628 if (!params)
3629 return -ENOMEM;
3630
3631 oparams = memdup_user(_oparams, sizeof(*oparams));
3632 if (IS_ERR(oparams)) {
3633 err = PTR_ERR(oparams);
3634 goto out;
3635 }
3636
3637 snd_pcm_hw_convert_from_old_params(params, oparams);
3638 err = snd_pcm_hw_params(substream, params);
3639 if (err < 0)
3640 goto out_old;
3641
3642 snd_pcm_hw_convert_to_old_params(oparams, params);
3643 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3644 err = -EFAULT;
3645 out_old:
3646 kfree(oparams);
3647 out:
3648 kfree(params);
3649 return err;
3650 }
3651 #endif /* CONFIG_SND_SUPPORT_OLD_API */
3652
3653 #ifndef CONFIG_MMU
3654 static unsigned long snd_pcm_get_unmapped_area(struct file *file,
3655 unsigned long addr,
3656 unsigned long len,
3657 unsigned long pgoff,
3658 unsigned long flags)
3659 {
3660 struct snd_pcm_file *pcm_file = file->private_data;
3661 struct snd_pcm_substream *substream = pcm_file->substream;
3662 struct snd_pcm_runtime *runtime = substream->runtime;
3663 unsigned long offset = pgoff << PAGE_SHIFT;
3664
3665 switch (offset) {
3666 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3667 return (unsigned long)runtime->status;
3668 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3669 return (unsigned long)runtime->control;
3670 default:
3671 return (unsigned long)runtime->dma_area + offset;
3672 }
3673 }
3674 #else
3675 # define snd_pcm_get_unmapped_area NULL
3676 #endif
3677
3678 /*
3679 * Register section
3680 */
3681
3682 const struct file_operations snd_pcm_f_ops[2] = {
3683 {
3684 .owner = THIS_MODULE,
3685 .write = snd_pcm_write,
3686 .write_iter = snd_pcm_writev,
3687 .open = snd_pcm_playback_open,
3688 .release = snd_pcm_release,
3689 .llseek = no_llseek,
3690 .poll = snd_pcm_poll,
3691 .unlocked_ioctl = snd_pcm_ioctl,
3692 .compat_ioctl = snd_pcm_ioctl_compat,
3693 .mmap = snd_pcm_mmap,
3694 .fasync = snd_pcm_fasync,
3695 .get_unmapped_area = snd_pcm_get_unmapped_area,
3696 },
3697 {
3698 .owner = THIS_MODULE,
3699 .read = snd_pcm_read,
3700 .read_iter = snd_pcm_readv,
3701 .open = snd_pcm_capture_open,
3702 .release = snd_pcm_release,
3703 .llseek = no_llseek,
3704 .poll = snd_pcm_poll,
3705 .unlocked_ioctl = snd_pcm_ioctl,
3706 .compat_ioctl = snd_pcm_ioctl_compat,
3707 .mmap = snd_pcm_mmap,
3708 .fasync = snd_pcm_fasync,
3709 .get_unmapped_area = snd_pcm_get_unmapped_area,
3710 }
3711 };