]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - sound/firewire/amdtp-stream.c
Merge tag 'iio-for-4.8a' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio...
[mirror_ubuntu-artful-kernel.git] / sound / firewire / amdtp-stream.c
1 /*
2 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
3 * with Common Isochronous Packet (IEC 61883-1) headers
4 *
5 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
6 * Licensed under the terms of the GNU General Public License, version 2.
7 */
8
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/firewire.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <sound/pcm.h>
15 #include <sound/pcm_params.h>
16 #include "amdtp-stream.h"
17
18 #define TICKS_PER_CYCLE 3072
19 #define CYCLES_PER_SECOND 8000
20 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
21
22 /* Always support Linux tracing subsystem. */
23 #define CREATE_TRACE_POINTS
24 #include "amdtp-stream-trace.h"
25
26 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
27
28 /* isochronous header parameters */
29 #define ISO_DATA_LENGTH_SHIFT 16
30 #define TAG_CIP 1
31
32 /* common isochronous packet header parameters */
33 #define CIP_EOH_SHIFT 31
34 #define CIP_EOH (1u << CIP_EOH_SHIFT)
35 #define CIP_EOH_MASK 0x80000000
36 #define CIP_SID_SHIFT 24
37 #define CIP_SID_MASK 0x3f000000
38 #define CIP_DBS_MASK 0x00ff0000
39 #define CIP_DBS_SHIFT 16
40 #define CIP_DBC_MASK 0x000000ff
41 #define CIP_FMT_SHIFT 24
42 #define CIP_FMT_MASK 0x3f000000
43 #define CIP_FDF_MASK 0x00ff0000
44 #define CIP_FDF_SHIFT 16
45 #define CIP_SYT_MASK 0x0000ffff
46 #define CIP_SYT_NO_INFO 0xffff
47
48 /* Audio and Music transfer protocol specific parameters */
49 #define CIP_FMT_AM 0x10
50 #define AMDTP_FDF_NO_DATA 0xff
51
52 /* TODO: make these configurable */
53 #define INTERRUPT_INTERVAL 16
54 #define QUEUE_LENGTH 48
55
56 #define IN_PACKET_HEADER_SIZE 4
57 #define OUT_PACKET_HEADER_SIZE 0
58
59 static void pcm_period_tasklet(unsigned long data);
60
61 /**
62 * amdtp_stream_init - initialize an AMDTP stream structure
63 * @s: the AMDTP stream to initialize
64 * @unit: the target of the stream
65 * @dir: the direction of stream
66 * @flags: the packet transmission method to use
67 * @fmt: the value of fmt field in CIP header
68 * @process_data_blocks: callback handler to process data blocks
69 * @protocol_size: the size to allocate newly for protocol
70 */
71 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
72 enum amdtp_stream_direction dir, enum cip_flags flags,
73 unsigned int fmt,
74 amdtp_stream_process_data_blocks_t process_data_blocks,
75 unsigned int protocol_size)
76 {
77 if (process_data_blocks == NULL)
78 return -EINVAL;
79
80 s->protocol = kzalloc(protocol_size, GFP_KERNEL);
81 if (!s->protocol)
82 return -ENOMEM;
83
84 s->unit = unit;
85 s->direction = dir;
86 s->flags = flags;
87 s->context = ERR_PTR(-1);
88 mutex_init(&s->mutex);
89 tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s);
90 s->packet_index = 0;
91
92 init_waitqueue_head(&s->callback_wait);
93 s->callbacked = false;
94
95 s->fmt = fmt;
96 s->process_data_blocks = process_data_blocks;
97
98 return 0;
99 }
100 EXPORT_SYMBOL(amdtp_stream_init);
101
102 /**
103 * amdtp_stream_destroy - free stream resources
104 * @s: the AMDTP stream to destroy
105 */
106 void amdtp_stream_destroy(struct amdtp_stream *s)
107 {
108 /* Not initialized. */
109 if (s->protocol == NULL)
110 return;
111
112 WARN_ON(amdtp_stream_running(s));
113 kfree(s->protocol);
114 mutex_destroy(&s->mutex);
115 }
116 EXPORT_SYMBOL(amdtp_stream_destroy);
117
118 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
119 [CIP_SFC_32000] = 8,
120 [CIP_SFC_44100] = 8,
121 [CIP_SFC_48000] = 8,
122 [CIP_SFC_88200] = 16,
123 [CIP_SFC_96000] = 16,
124 [CIP_SFC_176400] = 32,
125 [CIP_SFC_192000] = 32,
126 };
127 EXPORT_SYMBOL(amdtp_syt_intervals);
128
129 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
130 [CIP_SFC_32000] = 32000,
131 [CIP_SFC_44100] = 44100,
132 [CIP_SFC_48000] = 48000,
133 [CIP_SFC_88200] = 88200,
134 [CIP_SFC_96000] = 96000,
135 [CIP_SFC_176400] = 176400,
136 [CIP_SFC_192000] = 192000,
137 };
138 EXPORT_SYMBOL(amdtp_rate_table);
139
140 /**
141 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
142 * @s: the AMDTP stream, which must be initialized.
143 * @runtime: the PCM substream runtime
144 */
145 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
146 struct snd_pcm_runtime *runtime)
147 {
148 int err;
149
150 /*
151 * Currently firewire-lib processes 16 packets in one software
152 * interrupt callback. This equals to 2msec but actually the
153 * interval of the interrupts has a jitter.
154 * Additionally, even if adding a constraint to fit period size to
155 * 2msec, actual calculated frames per period doesn't equal to 2msec,
156 * depending on sampling rate.
157 * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec.
158 * Here let us use 5msec for safe period interrupt.
159 */
160 err = snd_pcm_hw_constraint_minmax(runtime,
161 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
162 5000, UINT_MAX);
163 if (err < 0)
164 goto end;
165
166 /* Non-Blocking stream has no more constraints */
167 if (!(s->flags & CIP_BLOCKING))
168 goto end;
169
170 /*
171 * One AMDTP packet can include some frames. In blocking mode, the
172 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
173 * depending on its sampling rate. For accurate period interrupt, it's
174 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
175 *
176 * TODO: These constraints can be improved with proper rules.
177 * Currently apply LCM of SYT_INTERVALs.
178 */
179 err = snd_pcm_hw_constraint_step(runtime, 0,
180 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32);
181 if (err < 0)
182 goto end;
183 err = snd_pcm_hw_constraint_step(runtime, 0,
184 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32);
185 end:
186 return err;
187 }
188 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
189
190 /**
191 * amdtp_stream_set_parameters - set stream parameters
192 * @s: the AMDTP stream to configure
193 * @rate: the sample rate
194 * @data_block_quadlets: the size of a data block in quadlet unit
195 *
196 * The parameters must be set before the stream is started, and must not be
197 * changed while the stream is running.
198 */
199 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
200 unsigned int data_block_quadlets)
201 {
202 unsigned int sfc;
203
204 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
205 if (amdtp_rate_table[sfc] == rate)
206 break;
207 }
208 if (sfc == ARRAY_SIZE(amdtp_rate_table))
209 return -EINVAL;
210
211 s->sfc = sfc;
212 s->data_block_quadlets = data_block_quadlets;
213 s->syt_interval = amdtp_syt_intervals[sfc];
214
215 /* default buffering in the device */
216 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
217 if (s->flags & CIP_BLOCKING)
218 /* additional buffering needed to adjust for no-data packets */
219 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
220
221 return 0;
222 }
223 EXPORT_SYMBOL(amdtp_stream_set_parameters);
224
225 /**
226 * amdtp_stream_get_max_payload - get the stream's packet size
227 * @s: the AMDTP stream
228 *
229 * This function must not be called before the stream has been configured
230 * with amdtp_stream_set_parameters().
231 */
232 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
233 {
234 unsigned int multiplier = 1;
235
236 if (s->flags & CIP_JUMBO_PAYLOAD)
237 multiplier = 5;
238
239 return 8 + s->syt_interval * s->data_block_quadlets * 4 * multiplier;
240 }
241 EXPORT_SYMBOL(amdtp_stream_get_max_payload);
242
243 /**
244 * amdtp_stream_pcm_prepare - prepare PCM device for running
245 * @s: the AMDTP stream
246 *
247 * This function should be called from the PCM device's .prepare callback.
248 */
249 void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
250 {
251 tasklet_kill(&s->period_tasklet);
252 s->pcm_buffer_pointer = 0;
253 s->pcm_period_pointer = 0;
254 }
255 EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
256
257 static unsigned int calculate_data_blocks(struct amdtp_stream *s,
258 unsigned int syt)
259 {
260 unsigned int phase, data_blocks;
261
262 /* Blocking mode. */
263 if (s->flags & CIP_BLOCKING) {
264 /* This module generate empty packet for 'no data'. */
265 if (syt == CIP_SYT_NO_INFO)
266 data_blocks = 0;
267 else
268 data_blocks = s->syt_interval;
269 /* Non-blocking mode. */
270 } else {
271 if (!cip_sfc_is_base_44100(s->sfc)) {
272 /* Sample_rate / 8000 is an integer, and precomputed. */
273 data_blocks = s->data_block_state;
274 } else {
275 phase = s->data_block_state;
276
277 /*
278 * This calculates the number of data blocks per packet so that
279 * 1) the overall rate is correct and exactly synchronized to
280 * the bus clock, and
281 * 2) packets with a rounded-up number of blocks occur as early
282 * as possible in the sequence (to prevent underruns of the
283 * device's buffer).
284 */
285 if (s->sfc == CIP_SFC_44100)
286 /* 6 6 5 6 5 6 5 ... */
287 data_blocks = 5 + ((phase & 1) ^
288 (phase == 0 || phase >= 40));
289 else
290 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
291 data_blocks = 11 * (s->sfc >> 1) + (phase == 0);
292 if (++phase >= (80 >> (s->sfc >> 1)))
293 phase = 0;
294 s->data_block_state = phase;
295 }
296 }
297
298 return data_blocks;
299 }
300
301 static unsigned int calculate_syt(struct amdtp_stream *s,
302 unsigned int cycle)
303 {
304 unsigned int syt_offset, phase, index, syt;
305
306 if (s->last_syt_offset < TICKS_PER_CYCLE) {
307 if (!cip_sfc_is_base_44100(s->sfc))
308 syt_offset = s->last_syt_offset + s->syt_offset_state;
309 else {
310 /*
311 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
312 * n * SYT_INTERVAL * 24576000 / sample_rate
313 * Modulo TICKS_PER_CYCLE, the difference between successive
314 * elements is about 1386.23. Rounding the results of this
315 * formula to the SYT precision results in a sequence of
316 * differences that begins with:
317 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
318 * This code generates _exactly_ the same sequence.
319 */
320 phase = s->syt_offset_state;
321 index = phase % 13;
322 syt_offset = s->last_syt_offset;
323 syt_offset += 1386 + ((index && !(index & 3)) ||
324 phase == 146);
325 if (++phase >= 147)
326 phase = 0;
327 s->syt_offset_state = phase;
328 }
329 } else
330 syt_offset = s->last_syt_offset - TICKS_PER_CYCLE;
331 s->last_syt_offset = syt_offset;
332
333 if (syt_offset < TICKS_PER_CYCLE) {
334 syt_offset += s->transfer_delay;
335 syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12;
336 syt += syt_offset % TICKS_PER_CYCLE;
337
338 return syt & CIP_SYT_MASK;
339 } else {
340 return CIP_SYT_NO_INFO;
341 }
342 }
343
344 static void update_pcm_pointers(struct amdtp_stream *s,
345 struct snd_pcm_substream *pcm,
346 unsigned int frames)
347 {
348 unsigned int ptr;
349
350 ptr = s->pcm_buffer_pointer + frames;
351 if (ptr >= pcm->runtime->buffer_size)
352 ptr -= pcm->runtime->buffer_size;
353 ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
354
355 s->pcm_period_pointer += frames;
356 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
357 s->pcm_period_pointer -= pcm->runtime->period_size;
358 tasklet_hi_schedule(&s->period_tasklet);
359 }
360 }
361
362 static void pcm_period_tasklet(unsigned long data)
363 {
364 struct amdtp_stream *s = (void *)data;
365 struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
366
367 if (pcm)
368 snd_pcm_period_elapsed(pcm);
369 }
370
371 static int queue_packet(struct amdtp_stream *s, unsigned int header_length,
372 unsigned int payload_length)
373 {
374 struct fw_iso_packet p = {0};
375 int err = 0;
376
377 if (IS_ERR(s->context))
378 goto end;
379
380 p.interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL);
381 p.tag = TAG_CIP;
382 p.header_length = header_length;
383 if (payload_length > 0)
384 p.payload_length = payload_length;
385 else
386 p.skip = true;
387 err = fw_iso_context_queue(s->context, &p, &s->buffer.iso_buffer,
388 s->buffer.packets[s->packet_index].offset);
389 if (err < 0) {
390 dev_err(&s->unit->device, "queueing error: %d\n", err);
391 goto end;
392 }
393
394 if (++s->packet_index >= QUEUE_LENGTH)
395 s->packet_index = 0;
396 end:
397 return err;
398 }
399
400 static inline int queue_out_packet(struct amdtp_stream *s,
401 unsigned int payload_length)
402 {
403 return queue_packet(s, OUT_PACKET_HEADER_SIZE, payload_length);
404 }
405
406 static inline int queue_in_packet(struct amdtp_stream *s)
407 {
408 return queue_packet(s, IN_PACKET_HEADER_SIZE,
409 amdtp_stream_get_max_payload(s));
410 }
411
412 static int handle_out_packet(struct amdtp_stream *s, unsigned int cycle,
413 unsigned int index)
414 {
415 __be32 *buffer;
416 unsigned int syt;
417 unsigned int data_blocks;
418 unsigned int payload_length;
419 unsigned int pcm_frames;
420 struct snd_pcm_substream *pcm;
421
422 buffer = s->buffer.packets[s->packet_index].buffer;
423 syt = calculate_syt(s, cycle);
424 data_blocks = calculate_data_blocks(s, syt);
425 pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt);
426
427 buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
428 (s->data_block_quadlets << CIP_DBS_SHIFT) |
429 s->data_block_counter);
430 buffer[1] = cpu_to_be32(CIP_EOH |
431 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
432 ((s->fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
433 (syt & CIP_SYT_MASK));
434
435 s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff;
436 payload_length = 8 + data_blocks * 4 * s->data_block_quadlets;
437
438 trace_out_packet(s, cycle, buffer, payload_length, index);
439
440 if (queue_out_packet(s, payload_length) < 0)
441 return -EIO;
442
443 pcm = ACCESS_ONCE(s->pcm);
444 if (pcm && pcm_frames > 0)
445 update_pcm_pointers(s, pcm, pcm_frames);
446
447 /* No need to return the number of handled data blocks. */
448 return 0;
449 }
450
451 static int handle_in_packet(struct amdtp_stream *s,
452 unsigned int payload_quadlets, unsigned int cycle,
453 unsigned int index)
454 {
455 __be32 *buffer;
456 u32 cip_header[2];
457 unsigned int fmt, fdf, syt;
458 unsigned int data_block_quadlets, data_block_counter, dbc_interval;
459 unsigned int data_blocks;
460 struct snd_pcm_substream *pcm;
461 unsigned int pcm_frames;
462 bool lost;
463
464 buffer = s->buffer.packets[s->packet_index].buffer;
465 cip_header[0] = be32_to_cpu(buffer[0]);
466 cip_header[1] = be32_to_cpu(buffer[1]);
467
468 trace_in_packet(s, cycle, cip_header, payload_quadlets, index);
469
470 /*
471 * This module supports 'Two-quadlet CIP header with SYT field'.
472 * For convenience, also check FMT field is AM824 or not.
473 */
474 if (((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
475 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) {
476 dev_info_ratelimited(&s->unit->device,
477 "Invalid CIP header for AMDTP: %08X:%08X\n",
478 cip_header[0], cip_header[1]);
479 data_blocks = 0;
480 pcm_frames = 0;
481 goto end;
482 }
483
484 /* Check valid protocol or not. */
485 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
486 if (fmt != s->fmt) {
487 dev_info_ratelimited(&s->unit->device,
488 "Detect unexpected protocol: %08x %08x\n",
489 cip_header[0], cip_header[1]);
490 data_blocks = 0;
491 pcm_frames = 0;
492 goto end;
493 }
494
495 /* Calculate data blocks */
496 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
497 if (payload_quadlets < 3 ||
498 (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
499 data_blocks = 0;
500 } else {
501 data_block_quadlets =
502 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
503 /* avoid division by zero */
504 if (data_block_quadlets == 0) {
505 dev_err(&s->unit->device,
506 "Detect invalid value in dbs field: %08X\n",
507 cip_header[0]);
508 return -EPROTO;
509 }
510 if (s->flags & CIP_WRONG_DBS)
511 data_block_quadlets = s->data_block_quadlets;
512
513 data_blocks = (payload_quadlets - 2) / data_block_quadlets;
514 }
515
516 /* Check data block counter continuity */
517 data_block_counter = cip_header[0] & CIP_DBC_MASK;
518 if (data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
519 s->data_block_counter != UINT_MAX)
520 data_block_counter = s->data_block_counter;
521
522 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
523 data_block_counter == s->tx_first_dbc) ||
524 s->data_block_counter == UINT_MAX) {
525 lost = false;
526 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
527 lost = data_block_counter != s->data_block_counter;
528 } else {
529 if (data_blocks > 0 && s->tx_dbc_interval > 0)
530 dbc_interval = s->tx_dbc_interval;
531 else
532 dbc_interval = data_blocks;
533
534 lost = data_block_counter !=
535 ((s->data_block_counter + dbc_interval) & 0xff);
536 }
537
538 if (lost) {
539 dev_err(&s->unit->device,
540 "Detect discontinuity of CIP: %02X %02X\n",
541 s->data_block_counter, data_block_counter);
542 return -EIO;
543 }
544
545 syt = be32_to_cpu(buffer[1]) & CIP_SYT_MASK;
546 pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt);
547
548 if (s->flags & CIP_DBC_IS_END_EVENT)
549 s->data_block_counter = data_block_counter;
550 else
551 s->data_block_counter =
552 (data_block_counter + data_blocks) & 0xff;
553 end:
554 if (queue_in_packet(s) < 0)
555 return -EIO;
556
557 pcm = ACCESS_ONCE(s->pcm);
558 if (pcm && pcm_frames > 0)
559 update_pcm_pointers(s, pcm, pcm_frames);
560
561 return 0;
562 }
563
564 /*
565 * In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
566 * the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
567 * it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
568 */
569 static inline u32 compute_cycle_count(u32 tstamp)
570 {
571 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
572 }
573
574 static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
575 {
576 cycle += addend;
577 if (cycle >= 8 * CYCLES_PER_SECOND)
578 cycle -= 8 * CYCLES_PER_SECOND;
579 return cycle;
580 }
581
582 static inline u32 decrement_cycle_count(u32 cycle, unsigned int subtrahend)
583 {
584 if (cycle < subtrahend)
585 cycle += 8 * CYCLES_PER_SECOND;
586 return cycle - subtrahend;
587 }
588
589 static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
590 size_t header_length, void *header,
591 void *private_data)
592 {
593 struct amdtp_stream *s = private_data;
594 unsigned int i, packets = header_length / 4;
595 u32 cycle;
596
597 if (s->packet_index < 0)
598 return;
599
600 cycle = compute_cycle_count(tstamp);
601
602 /* Align to actual cycle count for the last packet. */
603 cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets);
604
605 for (i = 0; i < packets; ++i) {
606 cycle = increment_cycle_count(cycle, 1);
607 if (handle_out_packet(s, cycle, i) < 0) {
608 s->packet_index = -1;
609 amdtp_stream_pcm_abort(s);
610 return;
611 }
612 }
613
614 fw_iso_context_queue_flush(s->context);
615 }
616
617 static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
618 size_t header_length, void *header,
619 void *private_data)
620 {
621 struct amdtp_stream *s = private_data;
622 unsigned int i, packets;
623 unsigned int payload_quadlets, max_payload_quadlets;
624 __be32 *headers = header;
625 u32 cycle;
626
627 if (s->packet_index < 0)
628 return;
629
630 /* The number of packets in buffer */
631 packets = header_length / IN_PACKET_HEADER_SIZE;
632
633 cycle = compute_cycle_count(tstamp);
634
635 /* Align to actual cycle count for the last packet. */
636 cycle = decrement_cycle_count(cycle, packets);
637
638 /* For buffer-over-run prevention. */
639 max_payload_quadlets = amdtp_stream_get_max_payload(s) / 4;
640
641 for (i = 0; i < packets; i++) {
642 cycle = increment_cycle_count(cycle, 1);
643
644 /* The number of quadlets in this packet */
645 payload_quadlets =
646 (be32_to_cpu(headers[i]) >> ISO_DATA_LENGTH_SHIFT) / 4;
647 if (payload_quadlets > max_payload_quadlets) {
648 dev_err(&s->unit->device,
649 "Detect jumbo payload: %02x %02x\n",
650 payload_quadlets, max_payload_quadlets);
651 break;
652 }
653
654 if (handle_in_packet(s, payload_quadlets, cycle, i) < 0)
655 break;
656 }
657
658 /* Queueing error or detecting invalid payload. */
659 if (i < packets) {
660 s->packet_index = -1;
661 amdtp_stream_pcm_abort(s);
662 return;
663 }
664
665 fw_iso_context_queue_flush(s->context);
666 }
667
668 /* this is executed one time */
669 static void amdtp_stream_first_callback(struct fw_iso_context *context,
670 u32 tstamp, size_t header_length,
671 void *header, void *private_data)
672 {
673 struct amdtp_stream *s = private_data;
674
675 /*
676 * For in-stream, first packet has come.
677 * For out-stream, prepared to transmit first packet
678 */
679 s->callbacked = true;
680 wake_up(&s->callback_wait);
681
682 if (s->direction == AMDTP_IN_STREAM)
683 context->callback.sc = in_stream_callback;
684 else
685 context->callback.sc = out_stream_callback;
686
687 context->callback.sc(context, tstamp, header_length, header, s);
688 }
689
690 /**
691 * amdtp_stream_start - start transferring packets
692 * @s: the AMDTP stream to start
693 * @channel: the isochronous channel on the bus
694 * @speed: firewire speed code
695 *
696 * The stream cannot be started until it has been configured with
697 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
698 * device can be started.
699 */
700 int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
701 {
702 static const struct {
703 unsigned int data_block;
704 unsigned int syt_offset;
705 } initial_state[] = {
706 [CIP_SFC_32000] = { 4, 3072 },
707 [CIP_SFC_48000] = { 6, 1024 },
708 [CIP_SFC_96000] = { 12, 1024 },
709 [CIP_SFC_192000] = { 24, 1024 },
710 [CIP_SFC_44100] = { 0, 67 },
711 [CIP_SFC_88200] = { 0, 67 },
712 [CIP_SFC_176400] = { 0, 67 },
713 };
714 unsigned int header_size;
715 enum dma_data_direction dir;
716 int type, tag, err;
717
718 mutex_lock(&s->mutex);
719
720 if (WARN_ON(amdtp_stream_running(s) ||
721 (s->data_block_quadlets < 1))) {
722 err = -EBADFD;
723 goto err_unlock;
724 }
725
726 if (s->direction == AMDTP_IN_STREAM)
727 s->data_block_counter = UINT_MAX;
728 else
729 s->data_block_counter = 0;
730 s->data_block_state = initial_state[s->sfc].data_block;
731 s->syt_offset_state = initial_state[s->sfc].syt_offset;
732 s->last_syt_offset = TICKS_PER_CYCLE;
733
734 /* initialize packet buffer */
735 if (s->direction == AMDTP_IN_STREAM) {
736 dir = DMA_FROM_DEVICE;
737 type = FW_ISO_CONTEXT_RECEIVE;
738 header_size = IN_PACKET_HEADER_SIZE;
739 } else {
740 dir = DMA_TO_DEVICE;
741 type = FW_ISO_CONTEXT_TRANSMIT;
742 header_size = OUT_PACKET_HEADER_SIZE;
743 }
744 err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH,
745 amdtp_stream_get_max_payload(s), dir);
746 if (err < 0)
747 goto err_unlock;
748
749 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
750 type, channel, speed, header_size,
751 amdtp_stream_first_callback, s);
752 if (IS_ERR(s->context)) {
753 err = PTR_ERR(s->context);
754 if (err == -EBUSY)
755 dev_err(&s->unit->device,
756 "no free stream on this controller\n");
757 goto err_buffer;
758 }
759
760 amdtp_stream_update(s);
761
762 s->packet_index = 0;
763 do {
764 if (s->direction == AMDTP_IN_STREAM)
765 err = queue_in_packet(s);
766 else
767 err = queue_out_packet(s, 0);
768 if (err < 0)
769 goto err_context;
770 } while (s->packet_index > 0);
771
772 /* NOTE: TAG1 matches CIP. This just affects in stream. */
773 tag = FW_ISO_CONTEXT_MATCH_TAG1;
774 if (s->flags & CIP_EMPTY_WITH_TAG0)
775 tag |= FW_ISO_CONTEXT_MATCH_TAG0;
776
777 s->callbacked = false;
778 err = fw_iso_context_start(s->context, -1, 0, tag);
779 if (err < 0)
780 goto err_context;
781
782 mutex_unlock(&s->mutex);
783
784 return 0;
785
786 err_context:
787 fw_iso_context_destroy(s->context);
788 s->context = ERR_PTR(-1);
789 err_buffer:
790 iso_packets_buffer_destroy(&s->buffer, s->unit);
791 err_unlock:
792 mutex_unlock(&s->mutex);
793
794 return err;
795 }
796 EXPORT_SYMBOL(amdtp_stream_start);
797
798 /**
799 * amdtp_stream_pcm_pointer - get the PCM buffer position
800 * @s: the AMDTP stream that transports the PCM data
801 *
802 * Returns the current buffer position, in frames.
803 */
804 unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
805 {
806 /*
807 * This function is called in software IRQ context of period_tasklet or
808 * process context.
809 *
810 * When the software IRQ context was scheduled by software IRQ context
811 * of IR/IT contexts, queued packets were already handled. Therefore,
812 * no need to flush the queue in buffer anymore.
813 *
814 * When the process context reach here, some packets will be already
815 * queued in the buffer. These packets should be handled immediately
816 * to keep better granularity of PCM pointer.
817 *
818 * Later, the process context will sometimes schedules software IRQ
819 * context of the period_tasklet. Then, no need to flush the queue by
820 * the same reason as described for IR/IT contexts.
821 */
822 if (!in_interrupt() && amdtp_stream_running(s))
823 fw_iso_context_flush_completions(s->context);
824
825 return ACCESS_ONCE(s->pcm_buffer_pointer);
826 }
827 EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
828
829 /**
830 * amdtp_stream_update - update the stream after a bus reset
831 * @s: the AMDTP stream
832 */
833 void amdtp_stream_update(struct amdtp_stream *s)
834 {
835 /* Precomputing. */
836 ACCESS_ONCE(s->source_node_id_field) =
837 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) &
838 CIP_SID_MASK;
839 }
840 EXPORT_SYMBOL(amdtp_stream_update);
841
842 /**
843 * amdtp_stream_stop - stop sending packets
844 * @s: the AMDTP stream to stop
845 *
846 * All PCM and MIDI devices of the stream must be stopped before the stream
847 * itself can be stopped.
848 */
849 void amdtp_stream_stop(struct amdtp_stream *s)
850 {
851 mutex_lock(&s->mutex);
852
853 if (!amdtp_stream_running(s)) {
854 mutex_unlock(&s->mutex);
855 return;
856 }
857
858 tasklet_kill(&s->period_tasklet);
859 fw_iso_context_stop(s->context);
860 fw_iso_context_destroy(s->context);
861 s->context = ERR_PTR(-1);
862 iso_packets_buffer_destroy(&s->buffer, s->unit);
863
864 s->callbacked = false;
865
866 mutex_unlock(&s->mutex);
867 }
868 EXPORT_SYMBOL(amdtp_stream_stop);
869
870 /**
871 * amdtp_stream_pcm_abort - abort the running PCM device
872 * @s: the AMDTP stream about to be stopped
873 *
874 * If the isochronous stream needs to be stopped asynchronously, call this
875 * function first to stop the PCM device.
876 */
877 void amdtp_stream_pcm_abort(struct amdtp_stream *s)
878 {
879 struct snd_pcm_substream *pcm;
880
881 pcm = ACCESS_ONCE(s->pcm);
882 if (pcm)
883 snd_pcm_stop_xrun(pcm);
884 }
885 EXPORT_SYMBOL(amdtp_stream_pcm_abort);