2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 #include <linux/init.h>
19 #include <linux/usb.h>
20 #include <linux/usb/audio.h>
22 #include <sound/core.h>
23 #include <sound/pcm.h>
32 * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
33 * this will overflow at approx 524 kHz
35 static inline unsigned get_usb_full_speed_rate(unsigned int rate
)
37 return ((rate
<< 13) + 62) / 125;
41 * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
42 * this will overflow at approx 4 MHz
44 static inline unsigned get_usb_high_speed_rate(unsigned int rate
)
46 return ((rate
<< 10) + 62) / 125;
52 static int deactivate_urbs(struct snd_usb_substream
*subs
, int force
, int can_sleep
)
54 struct snd_usb_audio
*chip
= subs
->stream
->chip
;
60 if (!force
&& subs
->stream
->chip
->shutdown
) /* to be sure... */
63 async
= !can_sleep
&& chip
->async_unlink
;
65 if (!async
&& in_interrupt())
68 for (i
= 0; i
< subs
->nurbs
; i
++) {
69 if (test_bit(i
, &subs
->active_mask
)) {
70 if (!test_and_set_bit(i
, &subs
->unlink_mask
)) {
71 struct urb
*u
= subs
->dataurb
[i
].urb
;
80 for (i
= 0; i
< SYNC_URBS
; i
++) {
81 if (test_bit(i
+16, &subs
->active_mask
)) {
82 if (!test_and_set_bit(i
+16, &subs
->unlink_mask
)) {
83 struct urb
*u
= subs
->syncurb
[i
].urb
;
99 static void release_urb_ctx(struct snd_urb_ctx
*u
)
103 usb_buffer_free(u
->subs
->dev
, u
->buffer_size
,
104 u
->urb
->transfer_buffer
,
105 u
->urb
->transfer_dma
);
106 usb_free_urb(u
->urb
);
112 * wait until all urbs are processed.
114 static int wait_clear_urbs(struct snd_usb_substream
*subs
)
116 unsigned long end_time
= jiffies
+ msecs_to_jiffies(1000);
122 for (i
= 0; i
< subs
->nurbs
; i
++) {
123 if (test_bit(i
, &subs
->active_mask
))
126 if (subs
->syncpipe
) {
127 for (i
= 0; i
< SYNC_URBS
; i
++) {
128 if (test_bit(i
+ 16, &subs
->active_mask
))
134 schedule_timeout_uninterruptible(1);
135 } while (time_before(jiffies
, end_time
));
137 snd_printk(KERN_ERR
"timeout: still %d active urbs..\n", alive
);
142 * release a substream
144 void snd_usb_release_substream_urbs(struct snd_usb_substream
*subs
, int force
)
148 /* stop urbs (to be sure) */
149 deactivate_urbs(subs
, force
, 1);
150 wait_clear_urbs(subs
);
152 for (i
= 0; i
< MAX_URBS
; i
++)
153 release_urb_ctx(&subs
->dataurb
[i
]);
154 for (i
= 0; i
< SYNC_URBS
; i
++)
155 release_urb_ctx(&subs
->syncurb
[i
]);
156 usb_buffer_free(subs
->dev
, SYNC_URBS
* 4,
157 subs
->syncbuf
, subs
->sync_dma
);
158 subs
->syncbuf
= NULL
;
163 * complete callback from data urb
165 static void snd_complete_urb(struct urb
*urb
)
167 struct snd_urb_ctx
*ctx
= urb
->context
;
168 struct snd_usb_substream
*subs
= ctx
->subs
;
169 struct snd_pcm_substream
*substream
= ctx
->subs
->pcm_substream
;
172 if ((subs
->running
&& subs
->ops
.retire(subs
, substream
->runtime
, urb
)) ||
173 !subs
->running
|| /* can be stopped during retire callback */
174 (err
= subs
->ops
.prepare(subs
, substream
->runtime
, urb
)) < 0 ||
175 (err
= usb_submit_urb(urb
, GFP_ATOMIC
)) < 0) {
176 clear_bit(ctx
->index
, &subs
->active_mask
);
178 snd_printd(KERN_ERR
"cannot submit urb (err = %d)\n", err
);
179 snd_pcm_stop(substream
, SNDRV_PCM_STATE_XRUN
);
186 * complete callback from sync urb
188 static void snd_complete_sync_urb(struct urb
*urb
)
190 struct snd_urb_ctx
*ctx
= urb
->context
;
191 struct snd_usb_substream
*subs
= ctx
->subs
;
192 struct snd_pcm_substream
*substream
= ctx
->subs
->pcm_substream
;
195 if ((subs
->running
&& subs
->ops
.retire_sync(subs
, substream
->runtime
, urb
)) ||
196 !subs
->running
|| /* can be stopped during retire callback */
197 (err
= subs
->ops
.prepare_sync(subs
, substream
->runtime
, urb
)) < 0 ||
198 (err
= usb_submit_urb(urb
, GFP_ATOMIC
)) < 0) {
199 clear_bit(ctx
->index
+ 16, &subs
->active_mask
);
201 snd_printd(KERN_ERR
"cannot submit sync urb (err = %d)\n", err
);
202 snd_pcm_stop(substream
, SNDRV_PCM_STATE_XRUN
);
209 * initialize a substream for plaback/capture
211 int snd_usb_init_substream_urbs(struct snd_usb_substream
*subs
,
212 unsigned int period_bytes
,
214 unsigned int frame_bits
)
216 unsigned int maxsize
, i
;
217 int is_playback
= subs
->direction
== SNDRV_PCM_STREAM_PLAYBACK
;
218 unsigned int urb_packs
, total_packs
, packs_per_ms
;
219 struct snd_usb_audio
*chip
= subs
->stream
->chip
;
221 /* calculate the frequency in 16.16 format */
222 if (snd_usb_get_speed(subs
->dev
) == USB_SPEED_FULL
)
223 subs
->freqn
= get_usb_full_speed_rate(rate
);
225 subs
->freqn
= get_usb_high_speed_rate(rate
);
226 subs
->freqm
= subs
->freqn
;
227 /* calculate max. frequency */
228 if (subs
->maxpacksize
) {
229 /* whatever fits into a max. size packet */
230 maxsize
= subs
->maxpacksize
;
231 subs
->freqmax
= (maxsize
/ (frame_bits
>> 3))
232 << (16 - subs
->datainterval
);
234 /* no max. packet size: just take 25% higher than nominal */
235 subs
->freqmax
= subs
->freqn
+ (subs
->freqn
>> 2);
236 maxsize
= ((subs
->freqmax
+ 0xffff) * (frame_bits
>> 3))
237 >> (16 - subs
->datainterval
);
242 subs
->curpacksize
= subs
->maxpacksize
;
244 subs
->curpacksize
= maxsize
;
246 if (snd_usb_get_speed(subs
->dev
) == USB_SPEED_HIGH
)
247 packs_per_ms
= 8 >> subs
->datainterval
;
252 urb_packs
= max(chip
->nrpacks
, 1);
253 urb_packs
= min(urb_packs
, (unsigned int)MAX_PACKS
);
256 urb_packs
*= packs_per_ms
;
258 urb_packs
= min(urb_packs
, 1U << subs
->syncinterval
);
260 /* decide how many packets to be used */
262 unsigned int minsize
, maxpacks
;
263 /* determine how small a packet can be */
264 minsize
= (subs
->freqn
>> (16 - subs
->datainterval
))
266 /* with sync from device, assume it can be 12% lower */
268 minsize
-= minsize
>> 3;
269 minsize
= max(minsize
, 1u);
270 total_packs
= (period_bytes
+ minsize
- 1) / minsize
;
271 /* we need at least two URBs for queueing */
272 if (total_packs
< 2) {
275 /* and we don't want too long a queue either */
276 maxpacks
= max(MAX_QUEUE
* packs_per_ms
, urb_packs
* 2);
277 total_packs
= min(total_packs
, maxpacks
);
280 while (urb_packs
> 1 && urb_packs
* maxsize
>= period_bytes
)
282 total_packs
= MAX_URBS
* urb_packs
;
284 subs
->nurbs
= (total_packs
+ urb_packs
- 1) / urb_packs
;
285 if (subs
->nurbs
> MAX_URBS
) {
287 subs
->nurbs
= MAX_URBS
;
288 total_packs
= MAX_URBS
* urb_packs
;
289 } else if (subs
->nurbs
< 2) {
290 /* too little - we need at least two packets
291 * to ensure contiguous playback/capture
296 /* allocate and initialize data urbs */
297 for (i
= 0; i
< subs
->nurbs
; i
++) {
298 struct snd_urb_ctx
*u
= &subs
->dataurb
[i
];
301 u
->packets
= (i
+ 1) * total_packs
/ subs
->nurbs
302 - i
* total_packs
/ subs
->nurbs
;
303 u
->buffer_size
= maxsize
* u
->packets
;
304 if (subs
->fmt_type
== UAC_FORMAT_TYPE_II
)
305 u
->packets
++; /* for transfer delimiter */
306 u
->urb
= usb_alloc_urb(u
->packets
, GFP_KERNEL
);
309 u
->urb
->transfer_buffer
=
310 usb_buffer_alloc(subs
->dev
, u
->buffer_size
, GFP_KERNEL
,
311 &u
->urb
->transfer_dma
);
312 if (!u
->urb
->transfer_buffer
)
314 u
->urb
->pipe
= subs
->datapipe
;
315 u
->urb
->transfer_flags
= URB_ISO_ASAP
| URB_NO_TRANSFER_DMA_MAP
;
316 u
->urb
->interval
= 1 << subs
->datainterval
;
318 u
->urb
->complete
= snd_complete_urb
;
321 if (subs
->syncpipe
) {
322 /* allocate and initialize sync urbs */
323 subs
->syncbuf
= usb_buffer_alloc(subs
->dev
, SYNC_URBS
* 4,
324 GFP_KERNEL
, &subs
->sync_dma
);
327 for (i
= 0; i
< SYNC_URBS
; i
++) {
328 struct snd_urb_ctx
*u
= &subs
->syncurb
[i
];
332 u
->urb
= usb_alloc_urb(1, GFP_KERNEL
);
335 u
->urb
->transfer_buffer
= subs
->syncbuf
+ i
* 4;
336 u
->urb
->transfer_dma
= subs
->sync_dma
+ i
* 4;
337 u
->urb
->transfer_buffer_length
= 4;
338 u
->urb
->pipe
= subs
->syncpipe
;
339 u
->urb
->transfer_flags
= URB_ISO_ASAP
|
340 URB_NO_TRANSFER_DMA_MAP
;
341 u
->urb
->number_of_packets
= 1;
342 u
->urb
->interval
= 1 << subs
->syncinterval
;
344 u
->urb
->complete
= snd_complete_sync_urb
;
350 snd_usb_release_substream_urbs(subs
, 0);
355 * prepare urb for full speed capture sync pipe
357 * fill the length and offset of each urb descriptor.
358 * the fixed 10.14 frequency is passed through the pipe.
360 static int prepare_capture_sync_urb(struct snd_usb_substream
*subs
,
361 struct snd_pcm_runtime
*runtime
,
364 unsigned char *cp
= urb
->transfer_buffer
;
365 struct snd_urb_ctx
*ctx
= urb
->context
;
367 urb
->dev
= ctx
->subs
->dev
; /* we need to set this at each time */
368 urb
->iso_frame_desc
[0].length
= 3;
369 urb
->iso_frame_desc
[0].offset
= 0;
370 cp
[0] = subs
->freqn
>> 2;
371 cp
[1] = subs
->freqn
>> 10;
372 cp
[2] = subs
->freqn
>> 18;
377 * prepare urb for high speed capture sync pipe
379 * fill the length and offset of each urb descriptor.
380 * the fixed 12.13 frequency is passed as 16.16 through the pipe.
382 static int prepare_capture_sync_urb_hs(struct snd_usb_substream
*subs
,
383 struct snd_pcm_runtime
*runtime
,
386 unsigned char *cp
= urb
->transfer_buffer
;
387 struct snd_urb_ctx
*ctx
= urb
->context
;
389 urb
->dev
= ctx
->subs
->dev
; /* we need to set this at each time */
390 urb
->iso_frame_desc
[0].length
= 4;
391 urb
->iso_frame_desc
[0].offset
= 0;
393 cp
[1] = subs
->freqn
>> 8;
394 cp
[2] = subs
->freqn
>> 16;
395 cp
[3] = subs
->freqn
>> 24;
400 * process after capture sync complete
403 static int retire_capture_sync_urb(struct snd_usb_substream
*subs
,
404 struct snd_pcm_runtime
*runtime
,
411 * prepare urb for capture data pipe
413 * fill the offset and length of each descriptor.
415 * we use a temporary buffer to write the captured data.
416 * since the length of written data is determined by host, we cannot
417 * write onto the pcm buffer directly... the data is thus copied
418 * later at complete callback to the global buffer.
420 static int prepare_capture_urb(struct snd_usb_substream
*subs
,
421 struct snd_pcm_runtime
*runtime
,
425 struct snd_urb_ctx
*ctx
= urb
->context
;
428 urb
->dev
= ctx
->subs
->dev
; /* we need to set this at each time */
429 for (i
= 0; i
< ctx
->packets
; i
++) {
430 urb
->iso_frame_desc
[i
].offset
= offs
;
431 urb
->iso_frame_desc
[i
].length
= subs
->curpacksize
;
432 offs
+= subs
->curpacksize
;
434 urb
->transfer_buffer_length
= offs
;
435 urb
->number_of_packets
= ctx
->packets
;
440 * process after capture complete
442 * copy the data from each desctiptor to the pcm buffer, and
443 * update the current position.
445 static int retire_capture_urb(struct snd_usb_substream
*subs
,
446 struct snd_pcm_runtime
*runtime
,
452 unsigned int stride
, frames
, bytes
, oldptr
;
453 int period_elapsed
= 0;
455 stride
= runtime
->frame_bits
>> 3;
457 for (i
= 0; i
< urb
->number_of_packets
; i
++) {
458 cp
= (unsigned char *)urb
->transfer_buffer
+ urb
->iso_frame_desc
[i
].offset
;
459 if (urb
->iso_frame_desc
[i
].status
) {
460 snd_printd(KERN_ERR
"frame %d active: %d\n", i
, urb
->iso_frame_desc
[i
].status
);
463 bytes
= urb
->iso_frame_desc
[i
].actual_length
;
464 frames
= bytes
/ stride
;
465 if (!subs
->txfr_quirk
)
466 bytes
= frames
* stride
;
467 if (bytes
% (runtime
->sample_bits
>> 3) != 0) {
468 #ifdef CONFIG_SND_DEBUG_VERBOSE
469 int oldbytes
= bytes
;
471 bytes
= frames
* stride
;
472 snd_printdd(KERN_ERR
"Corrected urb data len. %d->%d\n",
475 /* update the current pointer */
476 spin_lock_irqsave(&subs
->lock
, flags
);
477 oldptr
= subs
->hwptr_done
;
478 subs
->hwptr_done
+= bytes
;
479 if (subs
->hwptr_done
>= runtime
->buffer_size
* stride
)
480 subs
->hwptr_done
-= runtime
->buffer_size
* stride
;
481 frames
= (bytes
+ (oldptr
% stride
)) / stride
;
482 subs
->transfer_done
+= frames
;
483 if (subs
->transfer_done
>= runtime
->period_size
) {
484 subs
->transfer_done
-= runtime
->period_size
;
487 spin_unlock_irqrestore(&subs
->lock
, flags
);
488 /* copy a data chunk */
489 if (oldptr
+ bytes
> runtime
->buffer_size
* stride
) {
490 unsigned int bytes1
=
491 runtime
->buffer_size
* stride
- oldptr
;
492 memcpy(runtime
->dma_area
+ oldptr
, cp
, bytes1
);
493 memcpy(runtime
->dma_area
, cp
+ bytes1
, bytes
- bytes1
);
495 memcpy(runtime
->dma_area
+ oldptr
, cp
, bytes
);
499 snd_pcm_period_elapsed(subs
->pcm_substream
);
504 * Process after capture complete when paused. Nothing to do.
506 static int retire_paused_capture_urb(struct snd_usb_substream
*subs
,
507 struct snd_pcm_runtime
*runtime
,
515 * prepare urb for full speed playback sync pipe
517 * set up the offset and length to receive the current frequency.
520 static int prepare_playback_sync_urb(struct snd_usb_substream
*subs
,
521 struct snd_pcm_runtime
*runtime
,
524 struct snd_urb_ctx
*ctx
= urb
->context
;
526 urb
->dev
= ctx
->subs
->dev
; /* we need to set this at each time */
527 urb
->iso_frame_desc
[0].length
= 3;
528 urb
->iso_frame_desc
[0].offset
= 0;
533 * prepare urb for high speed playback sync pipe
535 * set up the offset and length to receive the current frequency.
538 static int prepare_playback_sync_urb_hs(struct snd_usb_substream
*subs
,
539 struct snd_pcm_runtime
*runtime
,
542 struct snd_urb_ctx
*ctx
= urb
->context
;
544 urb
->dev
= ctx
->subs
->dev
; /* we need to set this at each time */
545 urb
->iso_frame_desc
[0].length
= 4;
546 urb
->iso_frame_desc
[0].offset
= 0;
551 * process after full speed playback sync complete
553 * retrieve the current 10.14 frequency from pipe, and set it.
554 * the value is referred in prepare_playback_urb().
556 static int retire_playback_sync_urb(struct snd_usb_substream
*subs
,
557 struct snd_pcm_runtime
*runtime
,
563 if (urb
->iso_frame_desc
[0].status
== 0 &&
564 urb
->iso_frame_desc
[0].actual_length
== 3) {
565 f
= combine_triple((u8
*)urb
->transfer_buffer
) << 2;
566 if (f
>= subs
->freqn
- subs
->freqn
/ 8 && f
<= subs
->freqmax
) {
567 spin_lock_irqsave(&subs
->lock
, flags
);
569 spin_unlock_irqrestore(&subs
->lock
, flags
);
577 * process after high speed playback sync complete
579 * retrieve the current 12.13 frequency from pipe, and set it.
580 * the value is referred in prepare_playback_urb().
582 static int retire_playback_sync_urb_hs(struct snd_usb_substream
*subs
,
583 struct snd_pcm_runtime
*runtime
,
589 if (urb
->iso_frame_desc
[0].status
== 0 &&
590 urb
->iso_frame_desc
[0].actual_length
== 4) {
591 f
= combine_quad((u8
*)urb
->transfer_buffer
) & 0x0fffffff;
592 if (f
>= subs
->freqn
- subs
->freqn
/ 8 && f
<= subs
->freqmax
) {
593 spin_lock_irqsave(&subs
->lock
, flags
);
595 spin_unlock_irqrestore(&subs
->lock
, flags
);
603 * process after E-Mu 0202/0404/Tracker Pre high speed playback sync complete
605 * These devices return the number of samples per packet instead of the number
606 * of samples per microframe.
608 static int retire_playback_sync_urb_hs_emu(struct snd_usb_substream
*subs
,
609 struct snd_pcm_runtime
*runtime
,
615 if (urb
->iso_frame_desc
[0].status
== 0 &&
616 urb
->iso_frame_desc
[0].actual_length
== 4) {
617 f
= combine_quad((u8
*)urb
->transfer_buffer
) & 0x0fffffff;
618 f
>>= subs
->datainterval
;
619 if (f
>= subs
->freqn
- subs
->freqn
/ 8 && f
<= subs
->freqmax
) {
620 spin_lock_irqsave(&subs
->lock
, flags
);
622 spin_unlock_irqrestore(&subs
->lock
, flags
);
629 /* determine the number of frames in the next packet */
630 static int snd_usb_audio_next_packet_size(struct snd_usb_substream
*subs
)
633 return subs
->maxframesize
;
635 subs
->phase
= (subs
->phase
& 0xffff)
636 + (subs
->freqm
<< subs
->datainterval
);
637 return min(subs
->phase
>> 16, subs
->maxframesize
);
642 * Prepare urb for streaming before playback starts or when paused.
644 * We don't have any data, so we send silence.
646 static int prepare_nodata_playback_urb(struct snd_usb_substream
*subs
,
647 struct snd_pcm_runtime
*runtime
,
650 unsigned int i
, offs
, counts
;
651 struct snd_urb_ctx
*ctx
= urb
->context
;
652 int stride
= runtime
->frame_bits
>> 3;
655 urb
->dev
= ctx
->subs
->dev
;
656 for (i
= 0; i
< ctx
->packets
; ++i
) {
657 counts
= snd_usb_audio_next_packet_size(subs
);
658 urb
->iso_frame_desc
[i
].offset
= offs
* stride
;
659 urb
->iso_frame_desc
[i
].length
= counts
* stride
;
662 urb
->number_of_packets
= ctx
->packets
;
663 urb
->transfer_buffer_length
= offs
* stride
;
664 memset(urb
->transfer_buffer
,
665 runtime
->format
== SNDRV_PCM_FORMAT_U8
? 0x80 : 0,
671 * prepare urb for playback data pipe
673 * Since a URB can handle only a single linear buffer, we must use double
674 * buffering when the data to be transferred overflows the buffer boundary.
675 * To avoid inconsistencies when updating hwptr_done, we use double buffering
678 static int prepare_playback_urb(struct snd_usb_substream
*subs
,
679 struct snd_pcm_runtime
*runtime
,
683 unsigned int counts
, frames
, bytes
;
685 int period_elapsed
= 0;
686 struct snd_urb_ctx
*ctx
= urb
->context
;
688 stride
= runtime
->frame_bits
>> 3;
691 urb
->dev
= ctx
->subs
->dev
; /* we need to set this at each time */
692 urb
->number_of_packets
= 0;
693 spin_lock_irqsave(&subs
->lock
, flags
);
694 for (i
= 0; i
< ctx
->packets
; i
++) {
695 counts
= snd_usb_audio_next_packet_size(subs
);
696 /* set up descriptor */
697 urb
->iso_frame_desc
[i
].offset
= frames
* stride
;
698 urb
->iso_frame_desc
[i
].length
= counts
* stride
;
700 urb
->number_of_packets
++;
701 subs
->transfer_done
+= counts
;
702 if (subs
->transfer_done
>= runtime
->period_size
) {
703 subs
->transfer_done
-= runtime
->period_size
;
705 if (subs
->fmt_type
== UAC_FORMAT_TYPE_II
) {
706 if (subs
->transfer_done
> 0) {
707 /* FIXME: fill-max mode is not
709 frames
-= subs
->transfer_done
;
710 counts
-= subs
->transfer_done
;
711 urb
->iso_frame_desc
[i
].length
=
713 subs
->transfer_done
= 0;
716 if (i
< ctx
->packets
) {
717 /* add a transfer delimiter */
718 urb
->iso_frame_desc
[i
].offset
=
720 urb
->iso_frame_desc
[i
].length
= 0;
721 urb
->number_of_packets
++;
726 if (period_elapsed
) /* finish at the period boundary */
729 bytes
= frames
* stride
;
730 if (subs
->hwptr_done
+ bytes
> runtime
->buffer_size
* stride
) {
731 /* err, the transferred area goes over buffer boundary. */
732 unsigned int bytes1
=
733 runtime
->buffer_size
* stride
- subs
->hwptr_done
;
734 memcpy(urb
->transfer_buffer
,
735 runtime
->dma_area
+ subs
->hwptr_done
, bytes1
);
736 memcpy(urb
->transfer_buffer
+ bytes1
,
737 runtime
->dma_area
, bytes
- bytes1
);
739 memcpy(urb
->transfer_buffer
,
740 runtime
->dma_area
+ subs
->hwptr_done
, bytes
);
742 subs
->hwptr_done
+= bytes
;
743 if (subs
->hwptr_done
>= runtime
->buffer_size
* stride
)
744 subs
->hwptr_done
-= runtime
->buffer_size
* stride
;
745 runtime
->delay
+= frames
;
746 spin_unlock_irqrestore(&subs
->lock
, flags
);
747 urb
->transfer_buffer_length
= bytes
;
749 snd_pcm_period_elapsed(subs
->pcm_substream
);
754 * process after playback data complete
755 * - decrease the delay count again
757 static int retire_playback_urb(struct snd_usb_substream
*subs
,
758 struct snd_pcm_runtime
*runtime
,
762 int stride
= runtime
->frame_bits
>> 3;
763 int processed
= urb
->transfer_buffer_length
/ stride
;
765 spin_lock_irqsave(&subs
->lock
, flags
);
766 if (processed
> runtime
->delay
)
769 runtime
->delay
-= processed
;
770 spin_unlock_irqrestore(&subs
->lock
, flags
);
774 static const char *usb_error_string(int err
)
780 return "endpoint not enabled";
782 return "endpoint stalled";
784 return "not enough bandwidth";
786 return "device disabled";
788 return "device suspended";
793 return "internal error";
795 return "unknown error";
800 * set up and start data/sync urbs
802 static int start_urbs(struct snd_usb_substream
*subs
, struct snd_pcm_runtime
*runtime
)
807 if (subs
->stream
->chip
->shutdown
)
810 for (i
= 0; i
< subs
->nurbs
; i
++) {
811 if (snd_BUG_ON(!subs
->dataurb
[i
].urb
))
813 if (subs
->ops
.prepare(subs
, runtime
, subs
->dataurb
[i
].urb
) < 0) {
814 snd_printk(KERN_ERR
"cannot prepare datapipe for urb %d\n", i
);
818 if (subs
->syncpipe
) {
819 for (i
= 0; i
< SYNC_URBS
; i
++) {
820 if (snd_BUG_ON(!subs
->syncurb
[i
].urb
))
822 if (subs
->ops
.prepare_sync(subs
, runtime
, subs
->syncurb
[i
].urb
) < 0) {
823 snd_printk(KERN_ERR
"cannot prepare syncpipe for urb %d\n", i
);
829 subs
->active_mask
= 0;
830 subs
->unlink_mask
= 0;
832 for (i
= 0; i
< subs
->nurbs
; i
++) {
833 err
= usb_submit_urb(subs
->dataurb
[i
].urb
, GFP_ATOMIC
);
835 snd_printk(KERN_ERR
"cannot submit datapipe "
836 "for urb %d, error %d: %s\n",
837 i
, err
, usb_error_string(err
));
840 set_bit(i
, &subs
->active_mask
);
842 if (subs
->syncpipe
) {
843 for (i
= 0; i
< SYNC_URBS
; i
++) {
844 err
= usb_submit_urb(subs
->syncurb
[i
].urb
, GFP_ATOMIC
);
846 snd_printk(KERN_ERR
"cannot submit syncpipe "
847 "for urb %d, error %d: %s\n",
848 i
, err
, usb_error_string(err
));
851 set_bit(i
+ 16, &subs
->active_mask
);
857 // snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
858 deactivate_urbs(subs
, 0, 0);
865 static struct snd_urb_ops audio_urb_ops
[2] = {
867 .prepare
= prepare_nodata_playback_urb
,
868 .retire
= retire_playback_urb
,
869 .prepare_sync
= prepare_playback_sync_urb
,
870 .retire_sync
= retire_playback_sync_urb
,
873 .prepare
= prepare_capture_urb
,
874 .retire
= retire_capture_urb
,
875 .prepare_sync
= prepare_capture_sync_urb
,
876 .retire_sync
= retire_capture_sync_urb
,
880 static struct snd_urb_ops audio_urb_ops_high_speed
[2] = {
882 .prepare
= prepare_nodata_playback_urb
,
883 .retire
= retire_playback_urb
,
884 .prepare_sync
= prepare_playback_sync_urb_hs
,
885 .retire_sync
= retire_playback_sync_urb_hs
,
888 .prepare
= prepare_capture_urb
,
889 .retire
= retire_capture_urb
,
890 .prepare_sync
= prepare_capture_sync_urb_hs
,
891 .retire_sync
= retire_capture_sync_urb
,
896 * initialize the substream instance.
899 void snd_usb_init_substream(struct snd_usb_stream
*as
,
900 int stream
, struct audioformat
*fp
)
902 struct snd_usb_substream
*subs
= &as
->substream
[stream
];
904 INIT_LIST_HEAD(&subs
->fmt_list
);
905 spin_lock_init(&subs
->lock
);
908 subs
->direction
= stream
;
909 subs
->dev
= as
->chip
->dev
;
910 subs
->txfr_quirk
= as
->chip
->txfr_quirk
;
911 if (snd_usb_get_speed(subs
->dev
) == USB_SPEED_FULL
) {
912 subs
->ops
= audio_urb_ops
[stream
];
914 subs
->ops
= audio_urb_ops_high_speed
[stream
];
915 switch (as
->chip
->usb_id
) {
916 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
917 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
918 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
919 subs
->ops
.retire_sync
= retire_playback_sync_urb_hs_emu
;
924 snd_usb_set_pcm_ops(as
->pcm
, stream
);
926 list_add_tail(&fp
->list
, &subs
->fmt_list
);
927 subs
->formats
|= fp
->formats
;
928 subs
->endpoint
= fp
->endpoint
;
930 subs
->fmt_type
= fp
->fmt_type
;
933 int snd_usb_substream_playback_trigger(struct snd_pcm_substream
*substream
, int cmd
)
935 struct snd_usb_substream
*subs
= substream
->runtime
->private_data
;
938 case SNDRV_PCM_TRIGGER_START
:
939 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE
:
940 subs
->ops
.prepare
= prepare_playback_urb
;
942 case SNDRV_PCM_TRIGGER_STOP
:
943 return deactivate_urbs(subs
, 0, 0);
944 case SNDRV_PCM_TRIGGER_PAUSE_PUSH
:
945 subs
->ops
.prepare
= prepare_nodata_playback_urb
;
952 int snd_usb_substream_capture_trigger(struct snd_pcm_substream
*substream
, int cmd
)
954 struct snd_usb_substream
*subs
= substream
->runtime
->private_data
;
957 case SNDRV_PCM_TRIGGER_START
:
958 subs
->ops
.retire
= retire_capture_urb
;
959 return start_urbs(subs
, substream
->runtime
);
960 case SNDRV_PCM_TRIGGER_STOP
:
961 return deactivate_urbs(subs
, 0, 0);
962 case SNDRV_PCM_TRIGGER_PAUSE_PUSH
:
963 subs
->ops
.retire
= retire_paused_capture_urb
;
965 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE
:
966 subs
->ops
.retire
= retire_capture_urb
;
973 int snd_usb_substream_prepare(struct snd_usb_substream
*subs
,
974 struct snd_pcm_runtime
*runtime
)
976 /* clear urbs (to be sure) */
977 deactivate_urbs(subs
, 0, 1);
978 wait_clear_urbs(subs
);
980 /* for playback, submit the URBs now; otherwise, the first hwptr_done
981 * updates for all URBs would happen at the same time when starting */
982 if (subs
->direction
== SNDRV_PCM_STREAM_PLAYBACK
) {
983 subs
->ops
.prepare
= prepare_nodata_playback_urb
;
984 return start_urbs(subs
, runtime
);