1 // SPDX-License-Identifier: GPL-2.0-only
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/firewire.h>
12 #include <linux/firewire-constants.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <sound/pcm.h>
16 #include <sound/pcm_params.h>
17 #include "amdtp-stream.h"
19 #define TICKS_PER_CYCLE 3072
20 #define CYCLES_PER_SECOND 8000
21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
23 #define OHCI_SECOND_MODULUS 8
25 /* Always support Linux tracing subsystem. */
26 #define CREATE_TRACE_POINTS
27 #include "amdtp-stream-trace.h"
29 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
31 /* isochronous header parameters */
32 #define ISO_DATA_LENGTH_SHIFT 16
33 #define TAG_NO_CIP_HEADER 0
36 // Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
37 #define CIP_HEADER_QUADLETS 2
38 #define CIP_EOH_SHIFT 31
39 #define CIP_EOH (1u << CIP_EOH_SHIFT)
40 #define CIP_EOH_MASK 0x80000000
41 #define CIP_SID_SHIFT 24
42 #define CIP_SID_MASK 0x3f000000
43 #define CIP_DBS_MASK 0x00ff0000
44 #define CIP_DBS_SHIFT 16
45 #define CIP_SPH_MASK 0x00000400
46 #define CIP_SPH_SHIFT 10
47 #define CIP_DBC_MASK 0x000000ff
48 #define CIP_FMT_SHIFT 24
49 #define CIP_FMT_MASK 0x3f000000
50 #define CIP_FDF_MASK 0x00ff0000
51 #define CIP_FDF_SHIFT 16
52 #define CIP_FDF_NO_DATA 0xff
53 #define CIP_SYT_MASK 0x0000ffff
54 #define CIP_SYT_NO_INFO 0xffff
55 #define CIP_SYT_CYCLE_MODULUS 16
56 #define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
58 #define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
60 /* Audio and Music transfer protocol specific parameters */
61 #define CIP_FMT_AM 0x10
62 #define AMDTP_FDF_NO_DATA 0xff
64 // For iso header and tstamp.
65 #define IR_CTX_HEADER_DEFAULT_QUADLETS 2
67 #define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
68 // Add two quadlets CIP header.
69 #define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
70 #define HEADER_TSTAMP_MASK 0x0000ffff
72 #define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE
73 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
75 // The initial firmware of OXFW970 can postpone transmission of packet during finishing
76 // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
77 // overrun. Actual device can skip more, then this module stops the packet streaming.
78 #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
81 * amdtp_stream_init - initialize an AMDTP stream structure
82 * @s: the AMDTP stream to initialize
83 * @unit: the target of the stream
84 * @dir: the direction of stream
85 * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
86 * @fmt: the value of fmt field in CIP header
87 * @process_ctx_payloads: callback handler to process payloads of isoc context
88 * @protocol_size: the size to allocate newly for protocol
90 int amdtp_stream_init(struct amdtp_stream
*s
, struct fw_unit
*unit
,
91 enum amdtp_stream_direction dir
, unsigned int flags
,
93 amdtp_stream_process_ctx_payloads_t process_ctx_payloads
,
94 unsigned int protocol_size
)
96 if (process_ctx_payloads
== NULL
)
99 s
->protocol
= kzalloc(protocol_size
, GFP_KERNEL
);
106 s
->context
= ERR_PTR(-1);
107 mutex_init(&s
->mutex
);
110 init_waitqueue_head(&s
->ready_wait
);
113 s
->process_ctx_payloads
= process_ctx_payloads
;
117 EXPORT_SYMBOL(amdtp_stream_init
);
120 * amdtp_stream_destroy - free stream resources
121 * @s: the AMDTP stream to destroy
123 void amdtp_stream_destroy(struct amdtp_stream
*s
)
125 /* Not initialized. */
126 if (s
->protocol
== NULL
)
129 WARN_ON(amdtp_stream_running(s
));
131 mutex_destroy(&s
->mutex
);
133 EXPORT_SYMBOL(amdtp_stream_destroy
);
135 const unsigned int amdtp_syt_intervals
[CIP_SFC_COUNT
] = {
139 [CIP_SFC_88200
] = 16,
140 [CIP_SFC_96000
] = 16,
141 [CIP_SFC_176400
] = 32,
142 [CIP_SFC_192000
] = 32,
144 EXPORT_SYMBOL(amdtp_syt_intervals
);
146 const unsigned int amdtp_rate_table
[CIP_SFC_COUNT
] = {
147 [CIP_SFC_32000
] = 32000,
148 [CIP_SFC_44100
] = 44100,
149 [CIP_SFC_48000
] = 48000,
150 [CIP_SFC_88200
] = 88200,
151 [CIP_SFC_96000
] = 96000,
152 [CIP_SFC_176400
] = 176400,
153 [CIP_SFC_192000
] = 192000,
155 EXPORT_SYMBOL(amdtp_rate_table
);
157 static int apply_constraint_to_size(struct snd_pcm_hw_params
*params
,
158 struct snd_pcm_hw_rule
*rule
)
160 struct snd_interval
*s
= hw_param_interval(params
, rule
->var
);
161 const struct snd_interval
*r
=
162 hw_param_interval_c(params
, SNDRV_PCM_HW_PARAM_RATE
);
163 struct snd_interval t
= {0};
164 unsigned int step
= 0;
167 for (i
= 0; i
< CIP_SFC_COUNT
; ++i
) {
168 if (snd_interval_test(r
, amdtp_rate_table
[i
]))
169 step
= max(step
, amdtp_syt_intervals
[i
]);
172 t
.min
= roundup(s
->min
, step
);
173 t
.max
= rounddown(s
->max
, step
);
176 return snd_interval_refine(s
, &t
);
180 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
181 * @s: the AMDTP stream, which must be initialized.
182 * @runtime: the PCM substream runtime
184 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream
*s
,
185 struct snd_pcm_runtime
*runtime
)
187 struct snd_pcm_hardware
*hw
= &runtime
->hw
;
188 unsigned int ctx_header_size
;
189 unsigned int maximum_usec_per_period
;
192 hw
->info
= SNDRV_PCM_INFO_BLOCK_TRANSFER
|
193 SNDRV_PCM_INFO_INTERLEAVED
|
194 SNDRV_PCM_INFO_JOINT_DUPLEX
|
195 SNDRV_PCM_INFO_MMAP
|
196 SNDRV_PCM_INFO_MMAP_VALID
|
197 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP
;
200 hw
->periods_max
= UINT_MAX
;
202 /* bytes for a frame */
203 hw
->period_bytes_min
= 4 * hw
->channels_max
;
205 /* Just to prevent from allocating much pages. */
206 hw
->period_bytes_max
= hw
->period_bytes_min
* 2048;
207 hw
->buffer_bytes_max
= hw
->period_bytes_max
* hw
->periods_min
;
209 // Linux driver for 1394 OHCI controller voluntarily flushes isoc
210 // context when total size of accumulated context header reaches
211 // PAGE_SIZE. This kicks work for the isoc context and brings
212 // callback in the middle of scheduled interrupts.
213 // Although AMDTP streams in the same domain use the same events per
214 // IRQ, use the largest size of context header between IT/IR contexts.
215 // Here, use the value of context header in IR context is for both
217 if (!(s
->flags
& CIP_NO_HEADER
))
218 ctx_header_size
= IR_CTX_HEADER_SIZE_CIP
;
220 ctx_header_size
= IR_CTX_HEADER_SIZE_NO_CIP
;
221 maximum_usec_per_period
= USEC_PER_SEC
* PAGE_SIZE
/
222 CYCLES_PER_SECOND
/ ctx_header_size
;
224 // In IEC 61883-6, one isoc packet can transfer events up to the value
225 // of syt interval. This comes from the interval of isoc cycle. As 1394
226 // OHCI controller can generate hardware IRQ per isoc packet, the
227 // interval is 125 usec.
228 // However, there are two ways of transmission in IEC 61883-6; blocking
229 // and non-blocking modes. In blocking mode, the sequence of isoc packet
230 // includes 'empty' or 'NODATA' packets which include no event. In
231 // non-blocking mode, the number of events per packet is variable up to
233 // Due to the above protocol design, the minimum PCM frames per
234 // interrupt should be double of the value of syt interval, thus it is
236 err
= snd_pcm_hw_constraint_minmax(runtime
,
237 SNDRV_PCM_HW_PARAM_PERIOD_TIME
,
238 250, maximum_usec_per_period
);
242 /* Non-Blocking stream has no more constraints */
243 if (!(s
->flags
& CIP_BLOCKING
))
247 * One AMDTP packet can include some frames. In blocking mode, the
248 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
249 * depending on its sampling rate. For accurate period interrupt, it's
250 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
252 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE
,
253 apply_constraint_to_size
, NULL
,
254 SNDRV_PCM_HW_PARAM_PERIOD_SIZE
,
255 SNDRV_PCM_HW_PARAM_RATE
, -1);
258 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
259 apply_constraint_to_size
, NULL
,
260 SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
261 SNDRV_PCM_HW_PARAM_RATE
, -1);
267 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints
);
270 * amdtp_stream_set_parameters - set stream parameters
271 * @s: the AMDTP stream to configure
272 * @rate: the sample rate
273 * @data_block_quadlets: the size of a data block in quadlet unit
275 * The parameters must be set before the stream is started, and must not be
276 * changed while the stream is running.
278 int amdtp_stream_set_parameters(struct amdtp_stream
*s
, unsigned int rate
,
279 unsigned int data_block_quadlets
)
283 for (sfc
= 0; sfc
< ARRAY_SIZE(amdtp_rate_table
); ++sfc
) {
284 if (amdtp_rate_table
[sfc
] == rate
)
287 if (sfc
== ARRAY_SIZE(amdtp_rate_table
))
291 s
->data_block_quadlets
= data_block_quadlets
;
292 s
->syt_interval
= amdtp_syt_intervals
[sfc
];
294 // default buffering in the device.
295 s
->transfer_delay
= TRANSFER_DELAY_TICKS
- TICKS_PER_CYCLE
;
297 // additional buffering needed to adjust for no-data packets.
298 if (s
->flags
& CIP_BLOCKING
)
299 s
->transfer_delay
+= TICKS_PER_SECOND
* s
->syt_interval
/ rate
;
303 EXPORT_SYMBOL(amdtp_stream_set_parameters
);
305 // The CIP header is processed in context header apart from context payload.
306 static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream
*s
)
308 unsigned int multiplier
;
310 if (s
->flags
& CIP_JUMBO_PAYLOAD
)
311 multiplier
= IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES
;
315 return s
->syt_interval
* s
->data_block_quadlets
* sizeof(__be32
) * multiplier
;
319 * amdtp_stream_get_max_payload - get the stream's packet size
320 * @s: the AMDTP stream
322 * This function must not be called before the stream has been configured
323 * with amdtp_stream_set_parameters().
325 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream
*s
)
327 unsigned int cip_header_size
;
329 if (!(s
->flags
& CIP_NO_HEADER
))
330 cip_header_size
= CIP_HEADER_SIZE
;
334 return cip_header_size
+ amdtp_stream_get_max_ctx_payload_size(s
);
336 EXPORT_SYMBOL(amdtp_stream_get_max_payload
);
339 * amdtp_stream_pcm_prepare - prepare PCM device for running
340 * @s: the AMDTP stream
342 * This function should be called from the PCM device's .prepare callback.
344 void amdtp_stream_pcm_prepare(struct amdtp_stream
*s
)
346 s
->pcm_buffer_pointer
= 0;
347 s
->pcm_period_pointer
= 0;
349 EXPORT_SYMBOL(amdtp_stream_pcm_prepare
);
351 static void pool_blocking_data_blocks(struct amdtp_stream
*s
, struct seq_desc
*descs
,
352 const unsigned int seq_size
, unsigned int seq_tail
,
355 const unsigned int syt_interval
= s
->syt_interval
;
358 for (i
= 0; i
< count
; ++i
) {
359 struct seq_desc
*desc
= descs
+ seq_tail
;
361 if (desc
->syt_offset
!= CIP_SYT_NO_INFO
)
362 desc
->data_blocks
= syt_interval
;
364 desc
->data_blocks
= 0;
366 seq_tail
= (seq_tail
+ 1) % seq_size
;
370 static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream
*s
, struct seq_desc
*descs
,
371 const unsigned int seq_size
, unsigned int seq_tail
,
374 const enum cip_sfc sfc
= s
->sfc
;
375 unsigned int state
= s
->ctx_data
.rx
.data_block_state
;
378 for (i
= 0; i
< count
; ++i
) {
379 struct seq_desc
*desc
= descs
+ seq_tail
;
381 if (!cip_sfc_is_base_44100(sfc
)) {
382 // Sample_rate / 8000 is an integer, and precomputed.
383 desc
->data_blocks
= state
;
385 unsigned int phase
= state
;
388 * This calculates the number of data blocks per packet so that
389 * 1) the overall rate is correct and exactly synchronized to
391 * 2) packets with a rounded-up number of blocks occur as early
392 * as possible in the sequence (to prevent underruns of the
395 if (sfc
== CIP_SFC_44100
)
396 /* 6 6 5 6 5 6 5 ... */
397 desc
->data_blocks
= 5 + ((phase
& 1) ^ (phase
== 0 || phase
>= 40));
399 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
400 desc
->data_blocks
= 11 * (sfc
>> 1) + (phase
== 0);
401 if (++phase
>= (80 >> (sfc
>> 1)))
406 seq_tail
= (seq_tail
+ 1) % seq_size
;
409 s
->ctx_data
.rx
.data_block_state
= state
;
412 static unsigned int calculate_syt_offset(unsigned int *last_syt_offset
,
413 unsigned int *syt_offset_state
, enum cip_sfc sfc
)
415 unsigned int syt_offset
;
417 if (*last_syt_offset
< TICKS_PER_CYCLE
) {
418 if (!cip_sfc_is_base_44100(sfc
))
419 syt_offset
= *last_syt_offset
+ *syt_offset_state
;
422 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
423 * n * SYT_INTERVAL * 24576000 / sample_rate
424 * Modulo TICKS_PER_CYCLE, the difference between successive
425 * elements is about 1386.23. Rounding the results of this
426 * formula to the SYT precision results in a sequence of
427 * differences that begins with:
428 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
429 * This code generates _exactly_ the same sequence.
431 unsigned int phase
= *syt_offset_state
;
432 unsigned int index
= phase
% 13;
434 syt_offset
= *last_syt_offset
;
435 syt_offset
+= 1386 + ((index
&& !(index
& 3)) ||
439 *syt_offset_state
= phase
;
442 syt_offset
= *last_syt_offset
- TICKS_PER_CYCLE
;
443 *last_syt_offset
= syt_offset
;
445 if (syt_offset
>= TICKS_PER_CYCLE
)
446 syt_offset
= CIP_SYT_NO_INFO
;
451 static void pool_ideal_syt_offsets(struct amdtp_stream
*s
, struct seq_desc
*descs
,
452 const unsigned int seq_size
, unsigned int seq_tail
,
455 const enum cip_sfc sfc
= s
->sfc
;
456 unsigned int last
= s
->ctx_data
.rx
.last_syt_offset
;
457 unsigned int state
= s
->ctx_data
.rx
.syt_offset_state
;
460 for (i
= 0; i
< count
; ++i
) {
461 struct seq_desc
*desc
= descs
+ seq_tail
;
463 desc
->syt_offset
= calculate_syt_offset(&last
, &state
, sfc
);
465 seq_tail
= (seq_tail
+ 1) % seq_size
;
468 s
->ctx_data
.rx
.last_syt_offset
= last
;
469 s
->ctx_data
.rx
.syt_offset_state
= state
;
472 static unsigned int compute_syt_offset(unsigned int syt
, unsigned int cycle
,
473 unsigned int transfer_delay
)
475 unsigned int cycle_lo
= (cycle
% CYCLES_PER_SECOND
) & 0x0f;
476 unsigned int syt_cycle_lo
= (syt
& 0xf000) >> 12;
477 unsigned int syt_offset
;
480 if (syt_cycle_lo
< cycle_lo
)
481 syt_cycle_lo
+= CIP_SYT_CYCLE_MODULUS
;
482 syt_cycle_lo
-= cycle_lo
;
484 // Subtract transfer delay so that the synchronization offset is not so large
486 syt_offset
= syt_cycle_lo
* TICKS_PER_CYCLE
+ (syt
& 0x0fff);
487 if (syt_offset
< transfer_delay
)
488 syt_offset
+= CIP_SYT_CYCLE_MODULUS
* TICKS_PER_CYCLE
;
490 return syt_offset
- transfer_delay
;
493 // Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
494 // Additionally, the sequence of tx packets is severely checked against any discontinuity
495 // before filling entries in the queue. The calculation is safe even if it looks fragile by
497 static unsigned int calculate_cached_cycle_count(struct amdtp_stream
*s
, unsigned int head
)
499 const unsigned int cache_size
= s
->ctx_data
.tx
.cache
.size
;
500 unsigned int cycles
= s
->ctx_data
.tx
.cache
.tail
;
503 cycles
+= cache_size
;
509 static void cache_seq(struct amdtp_stream
*s
, const struct pkt_desc
*descs
, unsigned int desc_count
)
511 const unsigned int transfer_delay
= s
->transfer_delay
;
512 const unsigned int cache_size
= s
->ctx_data
.tx
.cache
.size
;
513 struct seq_desc
*cache
= s
->ctx_data
.tx
.cache
.descs
;
514 unsigned int cache_tail
= s
->ctx_data
.tx
.cache
.tail
;
515 bool aware_syt
= !(s
->flags
& CIP_UNAWARE_SYT
);
518 for (i
= 0; i
< desc_count
; ++i
) {
519 struct seq_desc
*dst
= cache
+ cache_tail
;
520 const struct pkt_desc
*src
= descs
+ i
;
522 if (aware_syt
&& src
->syt
!= CIP_SYT_NO_INFO
)
523 dst
->syt_offset
= compute_syt_offset(src
->syt
, src
->cycle
, transfer_delay
);
525 dst
->syt_offset
= CIP_SYT_NO_INFO
;
526 dst
->data_blocks
= src
->data_blocks
;
528 cache_tail
= (cache_tail
+ 1) % cache_size
;
531 s
->ctx_data
.tx
.cache
.tail
= cache_tail
;
534 static void pool_ideal_seq_descs(struct amdtp_stream
*s
, unsigned int count
)
536 struct seq_desc
*descs
= s
->ctx_data
.rx
.seq
.descs
;
537 unsigned int seq_tail
= s
->ctx_data
.rx
.seq
.tail
;
538 const unsigned int seq_size
= s
->ctx_data
.rx
.seq
.size
;
540 pool_ideal_syt_offsets(s
, descs
, seq_size
, seq_tail
, count
);
542 if (s
->flags
& CIP_BLOCKING
)
543 pool_blocking_data_blocks(s
, descs
, seq_size
, seq_tail
, count
);
545 pool_ideal_nonblocking_data_blocks(s
, descs
, seq_size
, seq_tail
, count
);
547 s
->ctx_data
.rx
.seq
.tail
= (seq_tail
+ count
) % seq_size
;
550 static void pool_replayed_seq(struct amdtp_stream
*s
, unsigned int count
)
552 struct amdtp_stream
*target
= s
->ctx_data
.rx
.replay_target
;
553 const struct seq_desc
*cache
= target
->ctx_data
.tx
.cache
.descs
;
554 const unsigned int cache_size
= target
->ctx_data
.tx
.cache
.size
;
555 unsigned int cache_head
= s
->ctx_data
.rx
.cache_head
;
556 struct seq_desc
*descs
= s
->ctx_data
.rx
.seq
.descs
;
557 const unsigned int seq_size
= s
->ctx_data
.rx
.seq
.size
;
558 unsigned int seq_tail
= s
->ctx_data
.rx
.seq
.tail
;
561 for (i
= 0; i
< count
; ++i
) {
562 descs
[seq_tail
] = cache
[cache_head
];
563 seq_tail
= (seq_tail
+ 1) % seq_size
;
564 cache_head
= (cache_head
+ 1) % cache_size
;
567 s
->ctx_data
.rx
.seq
.tail
= seq_tail
;
568 s
->ctx_data
.rx
.cache_head
= cache_head
;
571 static void pool_seq_descs(struct amdtp_stream
*s
, unsigned int count
)
573 struct amdtp_domain
*d
= s
->domain
;
575 if (!d
->replay
.enable
|| !s
->ctx_data
.rx
.replay_target
) {
576 pool_ideal_seq_descs(s
, count
);
578 if (!d
->replay
.on_the_fly
) {
579 pool_replayed_seq(s
, count
);
581 struct amdtp_stream
*tx
= s
->ctx_data
.rx
.replay_target
;
582 const unsigned int cache_size
= tx
->ctx_data
.tx
.cache
.size
;
583 const unsigned int cache_head
= s
->ctx_data
.rx
.cache_head
;
584 unsigned int cached_cycles
= calculate_cached_cycle_count(tx
, cache_head
);
586 if (cached_cycles
> count
&& cached_cycles
> cache_size
/ 2)
587 pool_replayed_seq(s
, count
);
589 pool_ideal_seq_descs(s
, count
);
594 static void update_pcm_pointers(struct amdtp_stream
*s
,
595 struct snd_pcm_substream
*pcm
,
600 ptr
= s
->pcm_buffer_pointer
+ frames
;
601 if (ptr
>= pcm
->runtime
->buffer_size
)
602 ptr
-= pcm
->runtime
->buffer_size
;
603 WRITE_ONCE(s
->pcm_buffer_pointer
, ptr
);
605 s
->pcm_period_pointer
+= frames
;
606 if (s
->pcm_period_pointer
>= pcm
->runtime
->period_size
) {
607 s
->pcm_period_pointer
-= pcm
->runtime
->period_size
;
609 // The program in user process should periodically check the status of intermediate
610 // buffer associated to PCM substream to process PCM frames in the buffer, instead
611 // of receiving notification of period elapsed by poll wait.
612 if (!pcm
->runtime
->no_period_wakeup
) {
614 // In software IRQ context for 1394 OHCI.
615 snd_pcm_period_elapsed(pcm
);
617 // In process context of ALSA PCM application under acquired lock of
619 snd_pcm_period_elapsed_under_stream_lock(pcm
);
625 static int queue_packet(struct amdtp_stream
*s
, struct fw_iso_packet
*params
,
630 params
->interrupt
= sched_irq
;
631 params
->tag
= s
->tag
;
634 err
= fw_iso_context_queue(s
->context
, params
, &s
->buffer
.iso_buffer
,
635 s
->buffer
.packets
[s
->packet_index
].offset
);
637 dev_err(&s
->unit
->device
, "queueing error: %d\n", err
);
641 if (++s
->packet_index
>= s
->queue_size
)
647 static inline int queue_out_packet(struct amdtp_stream
*s
,
648 struct fw_iso_packet
*params
, bool sched_irq
)
651 !!(params
->header_length
== 0 && params
->payload_length
== 0);
652 return queue_packet(s
, params
, sched_irq
);
655 static inline int queue_in_packet(struct amdtp_stream
*s
,
656 struct fw_iso_packet
*params
)
658 // Queue one packet for IR context.
659 params
->header_length
= s
->ctx_data
.tx
.ctx_header_size
;
660 params
->payload_length
= s
->ctx_data
.tx
.max_ctx_payload_length
;
661 params
->skip
= false;
662 return queue_packet(s
, params
, false);
665 static void generate_cip_header(struct amdtp_stream
*s
, __be32 cip_header
[2],
666 unsigned int data_block_counter
, unsigned int syt
)
668 cip_header
[0] = cpu_to_be32(READ_ONCE(s
->source_node_id_field
) |
669 (s
->data_block_quadlets
<< CIP_DBS_SHIFT
) |
670 ((s
->sph
<< CIP_SPH_SHIFT
) & CIP_SPH_MASK
) |
672 cip_header
[1] = cpu_to_be32(CIP_EOH
|
673 ((s
->fmt
<< CIP_FMT_SHIFT
) & CIP_FMT_MASK
) |
674 ((s
->ctx_data
.rx
.fdf
<< CIP_FDF_SHIFT
) & CIP_FDF_MASK
) |
675 (syt
& CIP_SYT_MASK
));
678 static void build_it_pkt_header(struct amdtp_stream
*s
, unsigned int cycle
,
679 struct fw_iso_packet
*params
, unsigned int header_length
,
680 unsigned int data_blocks
,
681 unsigned int data_block_counter
,
682 unsigned int syt
, unsigned int index
)
684 unsigned int payload_length
;
687 payload_length
= data_blocks
* sizeof(__be32
) * s
->data_block_quadlets
;
688 params
->payload_length
= payload_length
;
690 if (header_length
> 0) {
691 cip_header
= (__be32
*)params
->header
;
692 generate_cip_header(s
, cip_header
, data_block_counter
, syt
);
693 params
->header_length
= header_length
;
698 trace_amdtp_packet(s
, cycle
, cip_header
, payload_length
+ header_length
, data_blocks
,
699 data_block_counter
, s
->packet_index
, index
);
702 static int check_cip_header(struct amdtp_stream
*s
, const __be32
*buf
,
703 unsigned int payload_length
,
704 unsigned int *data_blocks
,
705 unsigned int *data_block_counter
, unsigned int *syt
)
714 cip_header
[0] = be32_to_cpu(buf
[0]);
715 cip_header
[1] = be32_to_cpu(buf
[1]);
718 * This module supports 'Two-quadlet CIP header with SYT field'.
719 * For convenience, also check FMT field is AM824 or not.
721 if ((((cip_header
[0] & CIP_EOH_MASK
) == CIP_EOH
) ||
722 ((cip_header
[1] & CIP_EOH_MASK
) != CIP_EOH
)) &&
723 (!(s
->flags
& CIP_HEADER_WITHOUT_EOH
))) {
724 dev_info_ratelimited(&s
->unit
->device
,
725 "Invalid CIP header for AMDTP: %08X:%08X\n",
726 cip_header
[0], cip_header
[1]);
730 /* Check valid protocol or not. */
731 sph
= (cip_header
[0] & CIP_SPH_MASK
) >> CIP_SPH_SHIFT
;
732 fmt
= (cip_header
[1] & CIP_FMT_MASK
) >> CIP_FMT_SHIFT
;
733 if (sph
!= s
->sph
|| fmt
!= s
->fmt
) {
734 dev_info_ratelimited(&s
->unit
->device
,
735 "Detect unexpected protocol: %08x %08x\n",
736 cip_header
[0], cip_header
[1]);
740 /* Calculate data blocks */
741 fdf
= (cip_header
[1] & CIP_FDF_MASK
) >> CIP_FDF_SHIFT
;
742 if (payload_length
== 0 || (fmt
== CIP_FMT_AM
&& fdf
== AMDTP_FDF_NO_DATA
)) {
745 unsigned int data_block_quadlets
=
746 (cip_header
[0] & CIP_DBS_MASK
) >> CIP_DBS_SHIFT
;
747 /* avoid division by zero */
748 if (data_block_quadlets
== 0) {
749 dev_err(&s
->unit
->device
,
750 "Detect invalid value in dbs field: %08X\n",
754 if (s
->flags
& CIP_WRONG_DBS
)
755 data_block_quadlets
= s
->data_block_quadlets
;
757 *data_blocks
= payload_length
/ sizeof(__be32
) / data_block_quadlets
;
760 /* Check data block counter continuity */
761 dbc
= cip_header
[0] & CIP_DBC_MASK
;
762 if (*data_blocks
== 0 && (s
->flags
& CIP_EMPTY_HAS_WRONG_DBC
) &&
763 *data_block_counter
!= UINT_MAX
)
764 dbc
= *data_block_counter
;
766 if ((dbc
== 0x00 && (s
->flags
& CIP_SKIP_DBC_ZERO_CHECK
)) ||
767 *data_block_counter
== UINT_MAX
) {
769 } else if (!(s
->flags
& CIP_DBC_IS_END_EVENT
)) {
770 lost
= dbc
!= *data_block_counter
;
772 unsigned int dbc_interval
;
774 if (*data_blocks
> 0 && s
->ctx_data
.tx
.dbc_interval
> 0)
775 dbc_interval
= s
->ctx_data
.tx
.dbc_interval
;
777 dbc_interval
= *data_blocks
;
779 lost
= dbc
!= ((*data_block_counter
+ dbc_interval
) & 0xff);
783 dev_err(&s
->unit
->device
,
784 "Detect discontinuity of CIP: %02X %02X\n",
785 *data_block_counter
, dbc
);
789 *data_block_counter
= dbc
;
791 if (!(s
->flags
& CIP_UNAWARE_SYT
))
792 *syt
= cip_header
[1] & CIP_SYT_MASK
;
797 static int parse_ir_ctx_header(struct amdtp_stream
*s
, unsigned int cycle
,
798 const __be32
*ctx_header
,
799 unsigned int *data_blocks
,
800 unsigned int *data_block_counter
,
801 unsigned int *syt
, unsigned int packet_index
, unsigned int index
)
803 unsigned int payload_length
;
804 const __be32
*cip_header
;
805 unsigned int cip_header_size
;
807 payload_length
= be32_to_cpu(ctx_header
[0]) >> ISO_DATA_LENGTH_SHIFT
;
809 if (!(s
->flags
& CIP_NO_HEADER
))
810 cip_header_size
= CIP_HEADER_SIZE
;
814 if (payload_length
> cip_header_size
+ s
->ctx_data
.tx
.max_ctx_payload_length
) {
815 dev_err(&s
->unit
->device
,
816 "Detect jumbo payload: %04x %04x\n",
817 payload_length
, cip_header_size
+ s
->ctx_data
.tx
.max_ctx_payload_length
);
821 if (cip_header_size
> 0) {
822 if (payload_length
>= cip_header_size
) {
825 cip_header
= ctx_header
+ IR_CTX_HEADER_DEFAULT_QUADLETS
;
826 err
= check_cip_header(s
, cip_header
, payload_length
- cip_header_size
,
827 data_blocks
, data_block_counter
, syt
);
831 // Handle the cycle so that empty packet arrives.
838 *data_blocks
= payload_length
/ sizeof(__be32
) / s
->data_block_quadlets
;
841 if (*data_block_counter
== UINT_MAX
)
842 *data_block_counter
= 0;
845 trace_amdtp_packet(s
, cycle
, cip_header
, payload_length
, *data_blocks
,
846 *data_block_counter
, packet_index
, index
);
851 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
852 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
853 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
854 static inline u32
compute_ohci_cycle_count(__be32 ctx_header_tstamp
)
856 u32 tstamp
= be32_to_cpu(ctx_header_tstamp
) & HEADER_TSTAMP_MASK
;
857 return (((tstamp
>> 13) & 0x07) * 8000) + (tstamp
& 0x1fff);
860 static inline u32
increment_ohci_cycle_count(u32 cycle
, unsigned int addend
)
863 if (cycle
>= OHCI_SECOND_MODULUS
* CYCLES_PER_SECOND
)
864 cycle
-= OHCI_SECOND_MODULUS
* CYCLES_PER_SECOND
;
868 static int compare_ohci_cycle_count(u32 lval
, u32 rval
)
872 else if (lval
< rval
&& rval
- lval
< OHCI_SECOND_MODULUS
* CYCLES_PER_SECOND
/ 2)
878 // Align to actual cycle count for the packet which is going to be scheduled.
879 // This module queued the same number of isochronous cycle as the size of queue
880 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
881 // the size of queue for scheduled cycle.
882 static inline u32
compute_ohci_it_cycle(const __be32 ctx_header_tstamp
,
883 unsigned int queue_size
)
885 u32 cycle
= compute_ohci_cycle_count(ctx_header_tstamp
);
886 return increment_ohci_cycle_count(cycle
, queue_size
);
889 static int generate_device_pkt_descs(struct amdtp_stream
*s
,
890 struct pkt_desc
*descs
,
891 const __be32
*ctx_header
,
892 unsigned int packets
,
893 unsigned int *desc_count
)
895 unsigned int next_cycle
= s
->next_cycle
;
896 unsigned int dbc
= s
->data_block_counter
;
897 unsigned int packet_index
= s
->packet_index
;
898 unsigned int queue_size
= s
->queue_size
;
903 for (i
= 0; i
< packets
; ++i
) {
904 struct pkt_desc
*desc
= descs
+ *desc_count
;
907 unsigned int data_blocks
;
910 cycle
= compute_ohci_cycle_count(ctx_header
[1]);
911 lost
= (next_cycle
!= cycle
);
913 if (s
->flags
& CIP_NO_HEADER
) {
914 // Fireface skips transmission just for an isoc cycle corresponding
916 unsigned int prev_cycle
= next_cycle
;
918 next_cycle
= increment_ohci_cycle_count(next_cycle
, 1);
919 lost
= (next_cycle
!= cycle
);
921 // Prepare a description for the skipped cycle for
923 desc
->cycle
= prev_cycle
;
925 desc
->data_blocks
= 0;
926 desc
->data_block_counter
= dbc
;
927 desc
->ctx_payload
= NULL
;
931 } else if (s
->flags
& CIP_JUMBO_PAYLOAD
) {
932 // OXFW970 skips transmission for several isoc cycles during
933 // asynchronous transaction. The sequence replay is impossible due
935 unsigned int safe_cycle
= increment_ohci_cycle_count(next_cycle
,
936 IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES
);
937 lost
= (compare_ohci_cycle_count(safe_cycle
, cycle
) > 0);
940 dev_err(&s
->unit
->device
, "Detect discontinuity of cycle: %d %d\n",
946 err
= parse_ir_ctx_header(s
, cycle
, ctx_header
, &data_blocks
, &dbc
, &syt
,
953 desc
->data_blocks
= data_blocks
;
954 desc
->data_block_counter
= dbc
;
955 desc
->ctx_payload
= s
->buffer
.packets
[packet_index
].buffer
;
957 if (!(s
->flags
& CIP_DBC_IS_END_EVENT
))
958 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
960 next_cycle
= increment_ohci_cycle_count(next_cycle
, 1);
962 ctx_header
+= s
->ctx_data
.tx
.ctx_header_size
/ sizeof(*ctx_header
);
963 packet_index
= (packet_index
+ 1) % queue_size
;
966 s
->next_cycle
= next_cycle
;
967 s
->data_block_counter
= dbc
;
972 static unsigned int compute_syt(unsigned int syt_offset
, unsigned int cycle
,
973 unsigned int transfer_delay
)
977 syt_offset
+= transfer_delay
;
978 syt
= ((cycle
+ syt_offset
/ TICKS_PER_CYCLE
) << 12) |
979 (syt_offset
% TICKS_PER_CYCLE
);
980 return syt
& CIP_SYT_MASK
;
983 static void generate_pkt_descs(struct amdtp_stream
*s
, const __be32
*ctx_header
, unsigned int packets
)
985 struct pkt_desc
*descs
= s
->pkt_descs
;
986 const struct seq_desc
*seq_descs
= s
->ctx_data
.rx
.seq
.descs
;
987 const unsigned int seq_size
= s
->ctx_data
.rx
.seq
.size
;
988 unsigned int dbc
= s
->data_block_counter
;
989 unsigned int seq_head
= s
->ctx_data
.rx
.seq
.head
;
990 bool aware_syt
= !(s
->flags
& CIP_UNAWARE_SYT
);
993 for (i
= 0; i
< packets
; ++i
) {
994 struct pkt_desc
*desc
= descs
+ i
;
995 unsigned int index
= (s
->packet_index
+ i
) % s
->queue_size
;
996 const struct seq_desc
*seq
= seq_descs
+ seq_head
;
998 desc
->cycle
= compute_ohci_it_cycle(*ctx_header
, s
->queue_size
);
1000 if (aware_syt
&& seq
->syt_offset
!= CIP_SYT_NO_INFO
)
1001 desc
->syt
= compute_syt(seq
->syt_offset
, desc
->cycle
, s
->transfer_delay
);
1003 desc
->syt
= CIP_SYT_NO_INFO
;
1005 desc
->data_blocks
= seq
->data_blocks
;
1007 if (s
->flags
& CIP_DBC_IS_END_EVENT
)
1008 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
1010 desc
->data_block_counter
= dbc
;
1012 if (!(s
->flags
& CIP_DBC_IS_END_EVENT
))
1013 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
1015 desc
->ctx_payload
= s
->buffer
.packets
[index
].buffer
;
1017 seq_head
= (seq_head
+ 1) % seq_size
;
1022 s
->data_block_counter
= dbc
;
1023 s
->ctx_data
.rx
.seq
.head
= seq_head
;
1026 static inline void cancel_stream(struct amdtp_stream
*s
)
1028 s
->packet_index
= -1;
1030 amdtp_stream_pcm_abort(s
);
1031 WRITE_ONCE(s
->pcm_buffer_pointer
, SNDRV_PCM_POS_XRUN
);
1034 static void process_ctx_payloads(struct amdtp_stream
*s
,
1035 const struct pkt_desc
*descs
,
1036 unsigned int packets
)
1038 struct snd_pcm_substream
*pcm
;
1039 unsigned int pcm_frames
;
1041 pcm
= READ_ONCE(s
->pcm
);
1042 pcm_frames
= s
->process_ctx_payloads(s
, descs
, packets
, pcm
);
1044 update_pcm_pointers(s
, pcm
, pcm_frames
);
1047 static void process_rx_packets(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1048 void *header
, void *private_data
)
1050 struct amdtp_stream
*s
= private_data
;
1051 const struct amdtp_domain
*d
= s
->domain
;
1052 const __be32
*ctx_header
= header
;
1053 const unsigned int events_per_period
= d
->events_per_period
;
1054 unsigned int event_count
= s
->ctx_data
.rx
.event_count
;
1055 unsigned int pkt_header_length
;
1056 unsigned int packets
;
1060 if (s
->packet_index
< 0)
1063 // Calculate the number of packets in buffer and check XRUN.
1064 packets
= header_length
/ sizeof(*ctx_header
);
1066 pool_seq_descs(s
, packets
);
1068 generate_pkt_descs(s
, ctx_header
, packets
);
1070 process_ctx_payloads(s
, s
->pkt_descs
, packets
);
1072 if (!(s
->flags
& CIP_NO_HEADER
))
1073 pkt_header_length
= IT_PKT_HEADER_SIZE_CIP
;
1075 pkt_header_length
= 0;
1077 if (s
== d
->irq_target
) {
1078 // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by
1079 // the tasks of user process operating ALSA PCM character device by calling ioctl(2)
1080 // with some requests, instead of scheduled hardware IRQ of an IT context.
1081 struct snd_pcm_substream
*pcm
= READ_ONCE(s
->pcm
);
1082 need_hw_irq
= !pcm
|| !pcm
->runtime
->no_period_wakeup
;
1084 need_hw_irq
= false;
1087 for (i
= 0; i
< packets
; ++i
) {
1088 const struct pkt_desc
*desc
= s
->pkt_descs
+ i
;
1090 struct fw_iso_packet params
;
1091 __be32 header
[CIP_HEADER_QUADLETS
];
1092 } template = { {0}, {0} };
1093 bool sched_irq
= false;
1095 build_it_pkt_header(s
, desc
->cycle
, &template.params
, pkt_header_length
,
1096 desc
->data_blocks
, desc
->data_block_counter
,
1099 if (s
== s
->domain
->irq_target
) {
1100 event_count
+= desc
->data_blocks
;
1101 if (event_count
>= events_per_period
) {
1102 event_count
-= events_per_period
;
1103 sched_irq
= need_hw_irq
;
1107 if (queue_out_packet(s
, &template.params
, sched_irq
) < 0) {
1113 s
->ctx_data
.rx
.event_count
= event_count
;
1116 static void skip_rx_packets(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1117 void *header
, void *private_data
)
1119 struct amdtp_stream
*s
= private_data
;
1120 struct amdtp_domain
*d
= s
->domain
;
1121 const __be32
*ctx_header
= header
;
1122 unsigned int packets
;
1126 if (s
->packet_index
< 0)
1129 packets
= header_length
/ sizeof(*ctx_header
);
1131 cycle
= compute_ohci_it_cycle(ctx_header
[packets
- 1], s
->queue_size
);
1132 s
->next_cycle
= increment_ohci_cycle_count(cycle
, 1);
1134 for (i
= 0; i
< packets
; ++i
) {
1135 struct fw_iso_packet params
= {
1137 .payload_length
= 0,
1139 bool sched_irq
= (s
== d
->irq_target
&& i
== packets
- 1);
1141 if (queue_out_packet(s
, ¶ms
, sched_irq
) < 0) {
1148 static void irq_target_callback(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1149 void *header
, void *private_data
);
1151 static void process_rx_packets_intermediately(struct fw_iso_context
*context
, u32 tstamp
,
1152 size_t header_length
, void *header
, void *private_data
)
1154 struct amdtp_stream
*s
= private_data
;
1155 struct amdtp_domain
*d
= s
->domain
;
1156 __be32
*ctx_header
= header
;
1157 const unsigned int queue_size
= s
->queue_size
;
1158 unsigned int packets
;
1159 unsigned int offset
;
1161 if (s
->packet_index
< 0)
1164 packets
= header_length
/ sizeof(*ctx_header
);
1167 while (offset
< packets
) {
1168 unsigned int cycle
= compute_ohci_it_cycle(ctx_header
[offset
], queue_size
);
1170 if (compare_ohci_cycle_count(cycle
, d
->processing_cycle
.rx_start
) >= 0)
1177 unsigned int length
= sizeof(*ctx_header
) * offset
;
1179 skip_rx_packets(context
, tstamp
, length
, ctx_header
, private_data
);
1180 if (amdtp_streaming_error(s
))
1183 ctx_header
+= offset
;
1184 header_length
-= length
;
1187 if (offset
< packets
) {
1188 s
->ready_processing
= true;
1189 wake_up(&s
->ready_wait
);
1191 process_rx_packets(context
, tstamp
, header_length
, ctx_header
, private_data
);
1192 if (amdtp_streaming_error(s
))
1195 if (s
== d
->irq_target
)
1196 s
->context
->callback
.sc
= irq_target_callback
;
1198 s
->context
->callback
.sc
= process_rx_packets
;
1202 static void process_tx_packets(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1203 void *header
, void *private_data
)
1205 struct amdtp_stream
*s
= private_data
;
1206 __be32
*ctx_header
= header
;
1207 unsigned int packets
;
1208 unsigned int desc_count
;
1212 if (s
->packet_index
< 0)
1215 // Calculate the number of packets in buffer and check XRUN.
1216 packets
= header_length
/ s
->ctx_data
.tx
.ctx_header_size
;
1219 err
= generate_device_pkt_descs(s
, s
->pkt_descs
, ctx_header
, packets
, &desc_count
);
1221 if (err
!= -EAGAIN
) {
1226 struct amdtp_domain
*d
= s
->domain
;
1228 process_ctx_payloads(s
, s
->pkt_descs
, desc_count
);
1230 if (d
->replay
.enable
)
1231 cache_seq(s
, s
->pkt_descs
, desc_count
);
1234 for (i
= 0; i
< packets
; ++i
) {
1235 struct fw_iso_packet params
= {0};
1237 if (queue_in_packet(s
, ¶ms
) < 0) {
1244 static void drop_tx_packets(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1245 void *header
, void *private_data
)
1247 struct amdtp_stream
*s
= private_data
;
1248 const __be32
*ctx_header
= header
;
1249 unsigned int packets
;
1253 if (s
->packet_index
< 0)
1256 packets
= header_length
/ s
->ctx_data
.tx
.ctx_header_size
;
1258 ctx_header
+= (packets
- 1) * s
->ctx_data
.tx
.ctx_header_size
/ sizeof(*ctx_header
);
1259 cycle
= compute_ohci_cycle_count(ctx_header
[1]);
1260 s
->next_cycle
= increment_ohci_cycle_count(cycle
, 1);
1262 for (i
= 0; i
< packets
; ++i
) {
1263 struct fw_iso_packet params
= {0};
1265 if (queue_in_packet(s
, ¶ms
) < 0) {
1272 static void process_tx_packets_intermediately(struct fw_iso_context
*context
, u32 tstamp
,
1273 size_t header_length
, void *header
, void *private_data
)
1275 struct amdtp_stream
*s
= private_data
;
1276 struct amdtp_domain
*d
= s
->domain
;
1278 unsigned int packets
;
1279 unsigned int offset
;
1281 if (s
->packet_index
< 0)
1284 packets
= header_length
/ s
->ctx_data
.tx
.ctx_header_size
;
1287 ctx_header
= header
;
1288 while (offset
< packets
) {
1289 unsigned int cycle
= compute_ohci_cycle_count(ctx_header
[1]);
1291 if (compare_ohci_cycle_count(cycle
, d
->processing_cycle
.tx_start
) >= 0)
1294 ctx_header
+= s
->ctx_data
.tx
.ctx_header_size
/ sizeof(__be32
);
1298 ctx_header
= header
;
1301 size_t length
= s
->ctx_data
.tx
.ctx_header_size
* offset
;
1303 drop_tx_packets(context
, tstamp
, length
, ctx_header
, s
);
1304 if (amdtp_streaming_error(s
))
1307 ctx_header
+= length
/ sizeof(*ctx_header
);
1308 header_length
-= length
;
1311 if (offset
< packets
) {
1312 s
->ready_processing
= true;
1313 wake_up(&s
->ready_wait
);
1315 process_tx_packets(context
, tstamp
, header_length
, ctx_header
, s
);
1316 if (amdtp_streaming_error(s
))
1319 context
->callback
.sc
= process_tx_packets
;
1323 static void drop_tx_packets_initially(struct fw_iso_context
*context
, u32 tstamp
,
1324 size_t header_length
, void *header
, void *private_data
)
1326 struct amdtp_stream
*s
= private_data
;
1327 struct amdtp_domain
*d
= s
->domain
;
1330 unsigned int events
;
1333 if (s
->packet_index
< 0)
1336 count
= header_length
/ s
->ctx_data
.tx
.ctx_header_size
;
1338 // Attempt to detect any event in the batch of packets.
1340 ctx_header
= header
;
1341 for (i
= 0; i
< count
; ++i
) {
1342 unsigned int payload_quads
=
1343 (be32_to_cpu(*ctx_header
) >> ISO_DATA_LENGTH_SHIFT
) / sizeof(__be32
);
1344 unsigned int data_blocks
;
1346 if (s
->flags
& CIP_NO_HEADER
) {
1347 data_blocks
= payload_quads
/ s
->data_block_quadlets
;
1349 __be32
*cip_headers
= ctx_header
+ IR_CTX_HEADER_DEFAULT_QUADLETS
;
1351 if (payload_quads
< CIP_HEADER_QUADLETS
) {
1354 payload_quads
-= CIP_HEADER_QUADLETS
;
1356 if (s
->flags
& CIP_UNAWARE_SYT
) {
1357 data_blocks
= payload_quads
/ s
->data_block_quadlets
;
1359 u32 cip1
= be32_to_cpu(cip_headers
[1]);
1361 // NODATA packet can includes any data blocks but they are
1362 // not available as event.
1363 if ((cip1
& CIP_NO_DATA
) == CIP_NO_DATA
)
1366 data_blocks
= payload_quads
/ s
->data_block_quadlets
;
1371 events
+= data_blocks
;
1373 ctx_header
+= s
->ctx_data
.tx
.ctx_header_size
/ sizeof(__be32
);
1376 drop_tx_packets(context
, tstamp
, header_length
, header
, s
);
1379 s
->ctx_data
.tx
.event_starts
= true;
1381 // Decide the cycle count to begin processing content of packet in IR contexts.
1383 unsigned int stream_count
= 0;
1384 unsigned int event_starts_count
= 0;
1385 unsigned int cycle
= UINT_MAX
;
1387 list_for_each_entry(s
, &d
->streams
, list
) {
1388 if (s
->direction
== AMDTP_IN_STREAM
) {
1390 if (s
->ctx_data
.tx
.event_starts
)
1391 ++event_starts_count
;
1395 if (stream_count
== event_starts_count
) {
1396 unsigned int next_cycle
;
1398 list_for_each_entry(s
, &d
->streams
, list
) {
1399 if (s
->direction
!= AMDTP_IN_STREAM
)
1402 next_cycle
= increment_ohci_cycle_count(s
->next_cycle
,
1403 d
->processing_cycle
.tx_init_skip
);
1404 if (cycle
== UINT_MAX
||
1405 compare_ohci_cycle_count(next_cycle
, cycle
) > 0)
1408 s
->context
->callback
.sc
= process_tx_packets_intermediately
;
1411 d
->processing_cycle
.tx_start
= cycle
;
1416 static void process_ctxs_in_domain(struct amdtp_domain
*d
)
1418 struct amdtp_stream
*s
;
1420 list_for_each_entry(s
, &d
->streams
, list
) {
1421 if (s
!= d
->irq_target
&& amdtp_stream_running(s
))
1422 fw_iso_context_flush_completions(s
->context
);
1424 if (amdtp_streaming_error(s
))
1430 if (amdtp_stream_running(d
->irq_target
))
1431 cancel_stream(d
->irq_target
);
1433 list_for_each_entry(s
, &d
->streams
, list
) {
1434 if (amdtp_stream_running(s
))
1439 static void irq_target_callback(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1440 void *header
, void *private_data
)
1442 struct amdtp_stream
*s
= private_data
;
1443 struct amdtp_domain
*d
= s
->domain
;
1445 process_rx_packets(context
, tstamp
, header_length
, header
, private_data
);
1446 process_ctxs_in_domain(d
);
1449 static void irq_target_callback_intermediately(struct fw_iso_context
*context
, u32 tstamp
,
1450 size_t header_length
, void *header
, void *private_data
)
1452 struct amdtp_stream
*s
= private_data
;
1453 struct amdtp_domain
*d
= s
->domain
;
1455 process_rx_packets_intermediately(context
, tstamp
, header_length
, header
, private_data
);
1456 process_ctxs_in_domain(d
);
1459 static void irq_target_callback_skip(struct fw_iso_context
*context
, u32 tstamp
,
1460 size_t header_length
, void *header
, void *private_data
)
1462 struct amdtp_stream
*s
= private_data
;
1463 struct amdtp_domain
*d
= s
->domain
;
1464 bool ready_to_start
;
1466 skip_rx_packets(context
, tstamp
, header_length
, header
, private_data
);
1467 process_ctxs_in_domain(d
);
1469 if (d
->replay
.enable
&& !d
->replay
.on_the_fly
) {
1470 unsigned int rx_count
= 0;
1471 unsigned int rx_ready_count
= 0;
1472 struct amdtp_stream
*rx
;
1474 list_for_each_entry(rx
, &d
->streams
, list
) {
1475 struct amdtp_stream
*tx
;
1476 unsigned int cached_cycles
;
1478 if (rx
->direction
!= AMDTP_OUT_STREAM
)
1482 tx
= rx
->ctx_data
.rx
.replay_target
;
1483 cached_cycles
= calculate_cached_cycle_count(tx
, 0);
1484 if (cached_cycles
> tx
->ctx_data
.tx
.cache
.size
/ 2)
1488 ready_to_start
= (rx_count
== rx_ready_count
);
1490 ready_to_start
= true;
1493 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
1494 // contexts are expected to start and get callback when reaching here.
1495 if (ready_to_start
) {
1496 unsigned int cycle
= s
->next_cycle
;
1497 list_for_each_entry(s
, &d
->streams
, list
) {
1498 if (s
->direction
!= AMDTP_OUT_STREAM
)
1501 if (compare_ohci_cycle_count(s
->next_cycle
, cycle
) > 0)
1502 cycle
= s
->next_cycle
;
1504 if (s
== d
->irq_target
)
1505 s
->context
->callback
.sc
= irq_target_callback_intermediately
;
1507 s
->context
->callback
.sc
= process_rx_packets_intermediately
;
1510 d
->processing_cycle
.rx_start
= cycle
;
1514 // This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
1515 // transmit first packet.
1516 static void amdtp_stream_first_callback(struct fw_iso_context
*context
,
1517 u32 tstamp
, size_t header_length
,
1518 void *header
, void *private_data
)
1520 struct amdtp_stream
*s
= private_data
;
1521 struct amdtp_domain
*d
= s
->domain
;
1523 if (s
->direction
== AMDTP_IN_STREAM
) {
1524 context
->callback
.sc
= drop_tx_packets_initially
;
1526 if (s
== d
->irq_target
)
1527 context
->callback
.sc
= irq_target_callback_skip
;
1529 context
->callback
.sc
= skip_rx_packets
;
1532 context
->callback
.sc(context
, tstamp
, header_length
, header
, s
);
1536 * amdtp_stream_start - start transferring packets
1537 * @s: the AMDTP stream to start
1538 * @channel: the isochronous channel on the bus
1539 * @speed: firewire speed code
1540 * @queue_size: The number of packets in the queue.
1541 * @idle_irq_interval: the interval to queue packet during initial state.
1543 * The stream cannot be started until it has been configured with
1544 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1545 * device can be started.
1547 static int amdtp_stream_start(struct amdtp_stream
*s
, int channel
, int speed
,
1548 unsigned int queue_size
, unsigned int idle_irq_interval
)
1550 bool is_irq_target
= (s
== s
->domain
->irq_target
);
1551 unsigned int ctx_header_size
;
1552 unsigned int max_ctx_payload_size
;
1553 enum dma_data_direction dir
;
1556 mutex_lock(&s
->mutex
);
1558 if (WARN_ON(amdtp_stream_running(s
) ||
1559 (s
->data_block_quadlets
< 1))) {
1564 if (s
->direction
== AMDTP_IN_STREAM
) {
1565 // NOTE: IT context should be used for constant IRQ.
1566 if (is_irq_target
) {
1571 s
->data_block_counter
= UINT_MAX
;
1573 s
->data_block_counter
= 0;
1576 // initialize packet buffer.
1577 if (s
->direction
== AMDTP_IN_STREAM
) {
1578 dir
= DMA_FROM_DEVICE
;
1579 type
= FW_ISO_CONTEXT_RECEIVE
;
1580 if (!(s
->flags
& CIP_NO_HEADER
))
1581 ctx_header_size
= IR_CTX_HEADER_SIZE_CIP
;
1583 ctx_header_size
= IR_CTX_HEADER_SIZE_NO_CIP
;
1585 dir
= DMA_TO_DEVICE
;
1586 type
= FW_ISO_CONTEXT_TRANSMIT
;
1587 ctx_header_size
= 0; // No effect for IT context.
1589 max_ctx_payload_size
= amdtp_stream_get_max_ctx_payload_size(s
);
1591 err
= iso_packets_buffer_init(&s
->buffer
, s
->unit
, queue_size
, max_ctx_payload_size
, dir
);
1594 s
->queue_size
= queue_size
;
1596 s
->context
= fw_iso_context_create(fw_parent_device(s
->unit
)->card
,
1597 type
, channel
, speed
, ctx_header_size
,
1598 amdtp_stream_first_callback
, s
);
1599 if (IS_ERR(s
->context
)) {
1600 err
= PTR_ERR(s
->context
);
1602 dev_err(&s
->unit
->device
,
1603 "no free stream on this controller\n");
1607 amdtp_stream_update(s
);
1609 if (s
->direction
== AMDTP_IN_STREAM
) {
1610 s
->ctx_data
.tx
.max_ctx_payload_length
= max_ctx_payload_size
;
1611 s
->ctx_data
.tx
.ctx_header_size
= ctx_header_size
;
1612 s
->ctx_data
.tx
.event_starts
= false;
1614 if (s
->domain
->replay
.enable
) {
1615 // struct fw_iso_context.drop_overflow_headers is false therefore it's
1616 // possible to cache much unexpectedly.
1617 s
->ctx_data
.tx
.cache
.size
= max_t(unsigned int, s
->syt_interval
* 2,
1618 queue_size
* 3 / 2);
1619 s
->ctx_data
.tx
.cache
.tail
= 0;
1620 s
->ctx_data
.tx
.cache
.descs
= kcalloc(s
->ctx_data
.tx
.cache
.size
,
1621 sizeof(*s
->ctx_data
.tx
.cache
.descs
), GFP_KERNEL
);
1622 if (!s
->ctx_data
.tx
.cache
.descs
) {
1628 static const struct {
1629 unsigned int data_block
;
1630 unsigned int syt_offset
;
1631 } *entry
, initial_state
[] = {
1632 [CIP_SFC_32000
] = { 4, 3072 },
1633 [CIP_SFC_48000
] = { 6, 1024 },
1634 [CIP_SFC_96000
] = { 12, 1024 },
1635 [CIP_SFC_192000
] = { 24, 1024 },
1636 [CIP_SFC_44100
] = { 0, 67 },
1637 [CIP_SFC_88200
] = { 0, 67 },
1638 [CIP_SFC_176400
] = { 0, 67 },
1641 s
->ctx_data
.rx
.seq
.descs
= kcalloc(queue_size
, sizeof(*s
->ctx_data
.rx
.seq
.descs
), GFP_KERNEL
);
1642 if (!s
->ctx_data
.rx
.seq
.descs
) {
1646 s
->ctx_data
.rx
.seq
.size
= queue_size
;
1647 s
->ctx_data
.rx
.seq
.tail
= 0;
1648 s
->ctx_data
.rx
.seq
.head
= 0;
1650 entry
= &initial_state
[s
->sfc
];
1651 s
->ctx_data
.rx
.data_block_state
= entry
->data_block
;
1652 s
->ctx_data
.rx
.syt_offset_state
= entry
->syt_offset
;
1653 s
->ctx_data
.rx
.last_syt_offset
= TICKS_PER_CYCLE
;
1655 s
->ctx_data
.rx
.event_count
= 0;
1658 if (s
->flags
& CIP_NO_HEADER
)
1659 s
->tag
= TAG_NO_CIP_HEADER
;
1663 s
->pkt_descs
= kcalloc(s
->queue_size
, sizeof(*s
->pkt_descs
),
1665 if (!s
->pkt_descs
) {
1670 s
->packet_index
= 0;
1672 struct fw_iso_packet params
;
1674 if (s
->direction
== AMDTP_IN_STREAM
) {
1675 err
= queue_in_packet(s
, ¶ms
);
1677 bool sched_irq
= false;
1679 params
.header_length
= 0;
1680 params
.payload_length
= 0;
1682 if (is_irq_target
) {
1683 sched_irq
= !((s
->packet_index
+ 1) %
1687 err
= queue_out_packet(s
, ¶ms
, sched_irq
);
1691 } while (s
->packet_index
> 0);
1693 /* NOTE: TAG1 matches CIP. This just affects in stream. */
1694 tag
= FW_ISO_CONTEXT_MATCH_TAG1
;
1695 if ((s
->flags
& CIP_EMPTY_WITH_TAG0
) || (s
->flags
& CIP_NO_HEADER
))
1696 tag
|= FW_ISO_CONTEXT_MATCH_TAG0
;
1698 s
->ready_processing
= false;
1699 err
= fw_iso_context_start(s
->context
, -1, 0, tag
);
1703 mutex_unlock(&s
->mutex
);
1707 kfree(s
->pkt_descs
);
1709 if (s
->direction
== AMDTP_OUT_STREAM
) {
1710 kfree(s
->ctx_data
.rx
.seq
.descs
);
1712 if (s
->domain
->replay
.enable
)
1713 kfree(s
->ctx_data
.tx
.cache
.descs
);
1715 fw_iso_context_destroy(s
->context
);
1716 s
->context
= ERR_PTR(-1);
1718 iso_packets_buffer_destroy(&s
->buffer
, s
->unit
);
1720 mutex_unlock(&s
->mutex
);
1726 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1727 * @d: the AMDTP domain.
1728 * @s: the AMDTP stream that transports the PCM data
1730 * Returns the current buffer position, in frames.
1732 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain
*d
,
1733 struct amdtp_stream
*s
)
1735 struct amdtp_stream
*irq_target
= d
->irq_target
;
1737 // Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
1738 if (irq_target
&& amdtp_stream_running(irq_target
)) {
1739 // In software IRQ context, the call causes dead-lock to disable the tasklet
1742 fw_iso_context_flush_completions(irq_target
->context
);
1745 return READ_ONCE(s
->pcm_buffer_pointer
);
1747 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer
);
1750 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1751 * @d: the AMDTP domain.
1752 * @s: the AMDTP stream that transfers the PCM frames
1754 * Returns zero always.
1756 int amdtp_domain_stream_pcm_ack(struct amdtp_domain
*d
, struct amdtp_stream
*s
)
1758 struct amdtp_stream
*irq_target
= d
->irq_target
;
1760 // Process isochronous packets for recent isochronous cycle to handle
1761 // queued PCM frames.
1762 if (irq_target
&& amdtp_stream_running(irq_target
))
1763 fw_iso_context_flush_completions(irq_target
->context
);
1767 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack
);
1770 * amdtp_stream_update - update the stream after a bus reset
1771 * @s: the AMDTP stream
1773 void amdtp_stream_update(struct amdtp_stream
*s
)
1776 WRITE_ONCE(s
->source_node_id_field
,
1777 (fw_parent_device(s
->unit
)->card
->node_id
<< CIP_SID_SHIFT
) & CIP_SID_MASK
);
1779 EXPORT_SYMBOL(amdtp_stream_update
);
1782 * amdtp_stream_stop - stop sending packets
1783 * @s: the AMDTP stream to stop
1785 * All PCM and MIDI devices of the stream must be stopped before the stream
1786 * itself can be stopped.
1788 static void amdtp_stream_stop(struct amdtp_stream
*s
)
1790 mutex_lock(&s
->mutex
);
1792 if (!amdtp_stream_running(s
)) {
1793 mutex_unlock(&s
->mutex
);
1797 fw_iso_context_stop(s
->context
);
1798 fw_iso_context_destroy(s
->context
);
1799 s
->context
= ERR_PTR(-1);
1800 iso_packets_buffer_destroy(&s
->buffer
, s
->unit
);
1801 kfree(s
->pkt_descs
);
1803 if (s
->direction
== AMDTP_OUT_STREAM
) {
1804 kfree(s
->ctx_data
.rx
.seq
.descs
);
1806 if (s
->domain
->replay
.enable
)
1807 kfree(s
->ctx_data
.tx
.cache
.descs
);
1810 mutex_unlock(&s
->mutex
);
1814 * amdtp_stream_pcm_abort - abort the running PCM device
1815 * @s: the AMDTP stream about to be stopped
1817 * If the isochronous stream needs to be stopped asynchronously, call this
1818 * function first to stop the PCM device.
1820 void amdtp_stream_pcm_abort(struct amdtp_stream
*s
)
1822 struct snd_pcm_substream
*pcm
;
1824 pcm
= READ_ONCE(s
->pcm
);
1826 snd_pcm_stop_xrun(pcm
);
1828 EXPORT_SYMBOL(amdtp_stream_pcm_abort
);
1831 * amdtp_domain_init - initialize an AMDTP domain structure
1832 * @d: the AMDTP domain to initialize.
1834 int amdtp_domain_init(struct amdtp_domain
*d
)
1836 INIT_LIST_HEAD(&d
->streams
);
1838 d
->events_per_period
= 0;
1842 EXPORT_SYMBOL_GPL(amdtp_domain_init
);
1845 * amdtp_domain_destroy - destroy an AMDTP domain structure
1846 * @d: the AMDTP domain to destroy.
1848 void amdtp_domain_destroy(struct amdtp_domain
*d
)
1850 // At present nothing to do.
1853 EXPORT_SYMBOL_GPL(amdtp_domain_destroy
);
1856 * amdtp_domain_add_stream - register isoc context into the domain.
1857 * @d: the AMDTP domain.
1858 * @s: the AMDTP stream.
1859 * @channel: the isochronous channel on the bus.
1860 * @speed: firewire speed code.
1862 int amdtp_domain_add_stream(struct amdtp_domain
*d
, struct amdtp_stream
*s
,
1863 int channel
, int speed
)
1865 struct amdtp_stream
*tmp
;
1867 list_for_each_entry(tmp
, &d
->streams
, list
) {
1872 list_add(&s
->list
, &d
->streams
);
1874 s
->channel
= channel
;
1880 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream
);
1882 // Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams
1883 // is less than the number of rx streams, the first tx stream is selected.
1884 static int make_association(struct amdtp_domain
*d
)
1886 unsigned int dst_index
= 0;
1887 struct amdtp_stream
*rx
;
1889 // Make association to replay target.
1890 list_for_each_entry(rx
, &d
->streams
, list
) {
1891 if (rx
->direction
== AMDTP_OUT_STREAM
) {
1892 unsigned int src_index
= 0;
1893 struct amdtp_stream
*tx
= NULL
;
1894 struct amdtp_stream
*s
;
1896 list_for_each_entry(s
, &d
->streams
, list
) {
1897 if (s
->direction
== AMDTP_IN_STREAM
) {
1898 if (dst_index
== src_index
) {
1907 // Select the first entry.
1908 list_for_each_entry(s
, &d
->streams
, list
) {
1909 if (s
->direction
== AMDTP_IN_STREAM
) {
1914 // No target is available to replay sequence.
1919 rx
->ctx_data
.rx
.replay_target
= tx
;
1920 rx
->ctx_data
.rx
.cache_head
= 0;
1930 * amdtp_domain_start - start sending packets for isoc context in the domain.
1931 * @d: the AMDTP domain.
1932 * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
1934 * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
1936 * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay
1937 * according to arrival of events in tx packets.
1939 int amdtp_domain_start(struct amdtp_domain
*d
, unsigned int tx_init_skip_cycles
, bool replay_seq
,
1940 bool replay_on_the_fly
)
1942 unsigned int events_per_buffer
= d
->events_per_buffer
;
1943 unsigned int events_per_period
= d
->events_per_period
;
1944 unsigned int queue_size
;
1945 struct amdtp_stream
*s
;
1950 err
= make_association(d
);
1954 d
->replay
.enable
= replay_seq
;
1955 d
->replay
.on_the_fly
= replay_on_the_fly
;
1957 // Select an IT context as IRQ target.
1958 list_for_each_entry(s
, &d
->streams
, list
) {
1959 if (s
->direction
== AMDTP_OUT_STREAM
) {
1968 d
->processing_cycle
.tx_init_skip
= tx_init_skip_cycles
;
1970 // This is a case that AMDTP streams in domain run just for MIDI
1971 // substream. Use the number of events equivalent to 10 msec as
1972 // interval of hardware IRQ.
1973 if (events_per_period
== 0)
1974 events_per_period
= amdtp_rate_table
[d
->irq_target
->sfc
] / 100;
1975 if (events_per_buffer
== 0)
1976 events_per_buffer
= events_per_period
* 3;
1978 queue_size
= DIV_ROUND_UP(CYCLES_PER_SECOND
* events_per_buffer
,
1979 amdtp_rate_table
[d
->irq_target
->sfc
]);
1981 list_for_each_entry(s
, &d
->streams
, list
) {
1982 unsigned int idle_irq_interval
= 0;
1984 if (s
->direction
== AMDTP_OUT_STREAM
&& s
== d
->irq_target
) {
1985 idle_irq_interval
= DIV_ROUND_UP(CYCLES_PER_SECOND
* events_per_period
,
1986 amdtp_rate_table
[d
->irq_target
->sfc
]);
1989 // Starts immediately but actually DMA context starts several hundred cycles later.
1990 err
= amdtp_stream_start(s
, s
->channel
, s
->speed
, queue_size
, idle_irq_interval
);
1997 list_for_each_entry(s
, &d
->streams
, list
)
1998 amdtp_stream_stop(s
);
2001 EXPORT_SYMBOL_GPL(amdtp_domain_start
);
2004 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
2005 * @d: the AMDTP domain to which the isoc contexts belong.
2007 void amdtp_domain_stop(struct amdtp_domain
*d
)
2009 struct amdtp_stream
*s
, *next
;
2012 amdtp_stream_stop(d
->irq_target
);
2014 list_for_each_entry_safe(s
, next
, &d
->streams
, list
) {
2017 if (s
!= d
->irq_target
)
2018 amdtp_stream_stop(s
);
2021 d
->events_per_period
= 0;
2022 d
->irq_target
= NULL
;
2024 EXPORT_SYMBOL_GPL(amdtp_domain_stop
);