1 // SPDX-License-Identifier: GPL-2.0-only
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/firewire.h>
12 #include <linux/firewire-constants.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <sound/pcm.h>
16 #include <sound/pcm_params.h>
17 #include "amdtp-stream.h"
19 #define TICKS_PER_CYCLE 3072
20 #define CYCLES_PER_SECOND 8000
21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
23 #define OHCI_SECOND_MODULUS 8
25 /* Always support Linux tracing subsystem. */
26 #define CREATE_TRACE_POINTS
27 #include "amdtp-stream-trace.h"
29 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
31 /* isochronous header parameters */
32 #define ISO_DATA_LENGTH_SHIFT 16
33 #define TAG_NO_CIP_HEADER 0
36 // Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
37 #define CIP_HEADER_QUADLETS 2
38 #define CIP_EOH_SHIFT 31
39 #define CIP_EOH (1u << CIP_EOH_SHIFT)
40 #define CIP_EOH_MASK 0x80000000
41 #define CIP_SID_SHIFT 24
42 #define CIP_SID_MASK 0x3f000000
43 #define CIP_DBS_MASK 0x00ff0000
44 #define CIP_DBS_SHIFT 16
45 #define CIP_SPH_MASK 0x00000400
46 #define CIP_SPH_SHIFT 10
47 #define CIP_DBC_MASK 0x000000ff
48 #define CIP_FMT_SHIFT 24
49 #define CIP_FMT_MASK 0x3f000000
50 #define CIP_FDF_MASK 0x00ff0000
51 #define CIP_FDF_SHIFT 16
52 #define CIP_FDF_NO_DATA 0xff
53 #define CIP_SYT_MASK 0x0000ffff
54 #define CIP_SYT_NO_INFO 0xffff
55 #define CIP_SYT_CYCLE_MODULUS 16
56 #define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
58 #define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
60 /* Audio and Music transfer protocol specific parameters */
61 #define CIP_FMT_AM 0x10
62 #define AMDTP_FDF_NO_DATA 0xff
64 // For iso header and tstamp.
65 #define IR_CTX_HEADER_DEFAULT_QUADLETS 2
67 #define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
68 // Add two quadlets CIP header.
69 #define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
70 #define HEADER_TSTAMP_MASK 0x0000ffff
72 #define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE
73 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
75 // The initial firmware of OXFW970 can postpone transmission of packet during finishing
76 // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
77 // overrun. Actual device can skip more, then this module stops the packet streaming.
78 #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
81 * amdtp_stream_init - initialize an AMDTP stream structure
82 * @s: the AMDTP stream to initialize
83 * @unit: the target of the stream
84 * @dir: the direction of stream
85 * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
86 * @fmt: the value of fmt field in CIP header
87 * @process_ctx_payloads: callback handler to process payloads of isoc context
88 * @protocol_size: the size to allocate newly for protocol
90 int amdtp_stream_init(struct amdtp_stream
*s
, struct fw_unit
*unit
,
91 enum amdtp_stream_direction dir
, unsigned int flags
,
93 amdtp_stream_process_ctx_payloads_t process_ctx_payloads
,
94 unsigned int protocol_size
)
96 if (process_ctx_payloads
== NULL
)
99 s
->protocol
= kzalloc(protocol_size
, GFP_KERNEL
);
106 s
->context
= ERR_PTR(-1);
107 mutex_init(&s
->mutex
);
110 init_waitqueue_head(&s
->ready_wait
);
113 s
->process_ctx_payloads
= process_ctx_payloads
;
117 EXPORT_SYMBOL(amdtp_stream_init
);
120 * amdtp_stream_destroy - free stream resources
121 * @s: the AMDTP stream to destroy
123 void amdtp_stream_destroy(struct amdtp_stream
*s
)
125 /* Not initialized. */
126 if (s
->protocol
== NULL
)
129 WARN_ON(amdtp_stream_running(s
));
131 mutex_destroy(&s
->mutex
);
133 EXPORT_SYMBOL(amdtp_stream_destroy
);
135 const unsigned int amdtp_syt_intervals
[CIP_SFC_COUNT
] = {
139 [CIP_SFC_88200
] = 16,
140 [CIP_SFC_96000
] = 16,
141 [CIP_SFC_176400
] = 32,
142 [CIP_SFC_192000
] = 32,
144 EXPORT_SYMBOL(amdtp_syt_intervals
);
146 const unsigned int amdtp_rate_table
[CIP_SFC_COUNT
] = {
147 [CIP_SFC_32000
] = 32000,
148 [CIP_SFC_44100
] = 44100,
149 [CIP_SFC_48000
] = 48000,
150 [CIP_SFC_88200
] = 88200,
151 [CIP_SFC_96000
] = 96000,
152 [CIP_SFC_176400
] = 176400,
153 [CIP_SFC_192000
] = 192000,
155 EXPORT_SYMBOL(amdtp_rate_table
);
157 static int apply_constraint_to_size(struct snd_pcm_hw_params
*params
,
158 struct snd_pcm_hw_rule
*rule
)
160 struct snd_interval
*s
= hw_param_interval(params
, rule
->var
);
161 const struct snd_interval
*r
=
162 hw_param_interval_c(params
, SNDRV_PCM_HW_PARAM_RATE
);
163 struct snd_interval t
= {0};
164 unsigned int step
= 0;
167 for (i
= 0; i
< CIP_SFC_COUNT
; ++i
) {
168 if (snd_interval_test(r
, amdtp_rate_table
[i
]))
169 step
= max(step
, amdtp_syt_intervals
[i
]);
172 t
.min
= roundup(s
->min
, step
);
173 t
.max
= rounddown(s
->max
, step
);
176 return snd_interval_refine(s
, &t
);
180 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
181 * @s: the AMDTP stream, which must be initialized.
182 * @runtime: the PCM substream runtime
184 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream
*s
,
185 struct snd_pcm_runtime
*runtime
)
187 struct snd_pcm_hardware
*hw
= &runtime
->hw
;
188 unsigned int ctx_header_size
;
189 unsigned int maximum_usec_per_period
;
192 hw
->info
= SNDRV_PCM_INFO_BLOCK_TRANSFER
|
193 SNDRV_PCM_INFO_INTERLEAVED
|
194 SNDRV_PCM_INFO_JOINT_DUPLEX
|
195 SNDRV_PCM_INFO_MMAP
|
196 SNDRV_PCM_INFO_MMAP_VALID
|
197 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP
;
200 hw
->periods_max
= UINT_MAX
;
202 /* bytes for a frame */
203 hw
->period_bytes_min
= 4 * hw
->channels_max
;
205 /* Just to prevent from allocating much pages. */
206 hw
->period_bytes_max
= hw
->period_bytes_min
* 2048;
207 hw
->buffer_bytes_max
= hw
->period_bytes_max
* hw
->periods_min
;
209 // Linux driver for 1394 OHCI controller voluntarily flushes isoc
210 // context when total size of accumulated context header reaches
211 // PAGE_SIZE. This kicks work for the isoc context and brings
212 // callback in the middle of scheduled interrupts.
213 // Although AMDTP streams in the same domain use the same events per
214 // IRQ, use the largest size of context header between IT/IR contexts.
215 // Here, use the value of context header in IR context is for both
217 if (!(s
->flags
& CIP_NO_HEADER
))
218 ctx_header_size
= IR_CTX_HEADER_SIZE_CIP
;
220 ctx_header_size
= IR_CTX_HEADER_SIZE_NO_CIP
;
221 maximum_usec_per_period
= USEC_PER_SEC
* PAGE_SIZE
/
222 CYCLES_PER_SECOND
/ ctx_header_size
;
224 // In IEC 61883-6, one isoc packet can transfer events up to the value
225 // of syt interval. This comes from the interval of isoc cycle. As 1394
226 // OHCI controller can generate hardware IRQ per isoc packet, the
227 // interval is 125 usec.
228 // However, there are two ways of transmission in IEC 61883-6; blocking
229 // and non-blocking modes. In blocking mode, the sequence of isoc packet
230 // includes 'empty' or 'NODATA' packets which include no event. In
231 // non-blocking mode, the number of events per packet is variable up to
233 // Due to the above protocol design, the minimum PCM frames per
234 // interrupt should be double of the value of syt interval, thus it is
236 err
= snd_pcm_hw_constraint_minmax(runtime
,
237 SNDRV_PCM_HW_PARAM_PERIOD_TIME
,
238 250, maximum_usec_per_period
);
242 /* Non-Blocking stream has no more constraints */
243 if (!(s
->flags
& CIP_BLOCKING
))
247 * One AMDTP packet can include some frames. In blocking mode, the
248 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
249 * depending on its sampling rate. For accurate period interrupt, it's
250 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
252 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE
,
253 apply_constraint_to_size
, NULL
,
254 SNDRV_PCM_HW_PARAM_PERIOD_SIZE
,
255 SNDRV_PCM_HW_PARAM_RATE
, -1);
258 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
259 apply_constraint_to_size
, NULL
,
260 SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
261 SNDRV_PCM_HW_PARAM_RATE
, -1);
267 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints
);
270 * amdtp_stream_set_parameters - set stream parameters
271 * @s: the AMDTP stream to configure
272 * @rate: the sample rate
273 * @data_block_quadlets: the size of a data block in quadlet unit
274 * @pcm_frame_multiplier: the multiplier to compute the number of PCM frames by the number of AMDTP
277 * The parameters must be set before the stream is started, and must not be
278 * changed while the stream is running.
280 int amdtp_stream_set_parameters(struct amdtp_stream
*s
, unsigned int rate
,
281 unsigned int data_block_quadlets
, unsigned int pcm_frame_multiplier
)
285 for (sfc
= 0; sfc
< ARRAY_SIZE(amdtp_rate_table
); ++sfc
) {
286 if (amdtp_rate_table
[sfc
] == rate
)
289 if (sfc
== ARRAY_SIZE(amdtp_rate_table
))
293 s
->data_block_quadlets
= data_block_quadlets
;
294 s
->syt_interval
= amdtp_syt_intervals
[sfc
];
296 // default buffering in the device.
297 s
->transfer_delay
= TRANSFER_DELAY_TICKS
- TICKS_PER_CYCLE
;
299 // additional buffering needed to adjust for no-data packets.
300 if (s
->flags
& CIP_BLOCKING
)
301 s
->transfer_delay
+= TICKS_PER_SECOND
* s
->syt_interval
/ rate
;
303 s
->pcm_frame_multiplier
= pcm_frame_multiplier
;
307 EXPORT_SYMBOL(amdtp_stream_set_parameters
);
309 // The CIP header is processed in context header apart from context payload.
310 static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream
*s
)
312 unsigned int multiplier
;
314 if (s
->flags
& CIP_JUMBO_PAYLOAD
)
315 multiplier
= IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES
;
319 return s
->syt_interval
* s
->data_block_quadlets
* sizeof(__be32
) * multiplier
;
323 * amdtp_stream_get_max_payload - get the stream's packet size
324 * @s: the AMDTP stream
326 * This function must not be called before the stream has been configured
327 * with amdtp_stream_set_parameters().
329 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream
*s
)
331 unsigned int cip_header_size
;
333 if (!(s
->flags
& CIP_NO_HEADER
))
334 cip_header_size
= CIP_HEADER_SIZE
;
338 return cip_header_size
+ amdtp_stream_get_max_ctx_payload_size(s
);
340 EXPORT_SYMBOL(amdtp_stream_get_max_payload
);
343 * amdtp_stream_pcm_prepare - prepare PCM device for running
344 * @s: the AMDTP stream
346 * This function should be called from the PCM device's .prepare callback.
348 void amdtp_stream_pcm_prepare(struct amdtp_stream
*s
)
350 s
->pcm_buffer_pointer
= 0;
351 s
->pcm_period_pointer
= 0;
353 EXPORT_SYMBOL(amdtp_stream_pcm_prepare
);
355 #define prev_packet_desc(s, desc) \
356 list_prev_entry_circular(desc, &s->packet_descs_list, link)
358 static void pool_blocking_data_blocks(struct amdtp_stream
*s
, struct seq_desc
*descs
,
359 unsigned int size
, unsigned int pos
, unsigned int count
)
361 const unsigned int syt_interval
= s
->syt_interval
;
364 for (i
= 0; i
< count
; ++i
) {
365 struct seq_desc
*desc
= descs
+ pos
;
367 if (desc
->syt_offset
!= CIP_SYT_NO_INFO
)
368 desc
->data_blocks
= syt_interval
;
370 desc
->data_blocks
= 0;
372 pos
= (pos
+ 1) % size
;
376 static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream
*s
, struct seq_desc
*descs
,
377 unsigned int size
, unsigned int pos
,
380 const enum cip_sfc sfc
= s
->sfc
;
381 unsigned int state
= s
->ctx_data
.rx
.data_block_state
;
384 for (i
= 0; i
< count
; ++i
) {
385 struct seq_desc
*desc
= descs
+ pos
;
387 if (!cip_sfc_is_base_44100(sfc
)) {
388 // Sample_rate / 8000 is an integer, and precomputed.
389 desc
->data_blocks
= state
;
391 unsigned int phase
= state
;
394 * This calculates the number of data blocks per packet so that
395 * 1) the overall rate is correct and exactly synchronized to
397 * 2) packets with a rounded-up number of blocks occur as early
398 * as possible in the sequence (to prevent underruns of the
401 if (sfc
== CIP_SFC_44100
)
402 /* 6 6 5 6 5 6 5 ... */
403 desc
->data_blocks
= 5 + ((phase
& 1) ^ (phase
== 0 || phase
>= 40));
405 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
406 desc
->data_blocks
= 11 * (sfc
>> 1) + (phase
== 0);
407 if (++phase
>= (80 >> (sfc
>> 1)))
412 pos
= (pos
+ 1) % size
;
415 s
->ctx_data
.rx
.data_block_state
= state
;
418 static unsigned int calculate_syt_offset(unsigned int *last_syt_offset
,
419 unsigned int *syt_offset_state
, enum cip_sfc sfc
)
421 unsigned int syt_offset
;
423 if (*last_syt_offset
< TICKS_PER_CYCLE
) {
424 if (!cip_sfc_is_base_44100(sfc
))
425 syt_offset
= *last_syt_offset
+ *syt_offset_state
;
428 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
429 * n * SYT_INTERVAL * 24576000 / sample_rate
430 * Modulo TICKS_PER_CYCLE, the difference between successive
431 * elements is about 1386.23. Rounding the results of this
432 * formula to the SYT precision results in a sequence of
433 * differences that begins with:
434 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
435 * This code generates _exactly_ the same sequence.
437 unsigned int phase
= *syt_offset_state
;
438 unsigned int index
= phase
% 13;
440 syt_offset
= *last_syt_offset
;
441 syt_offset
+= 1386 + ((index
&& !(index
& 3)) ||
445 *syt_offset_state
= phase
;
448 syt_offset
= *last_syt_offset
- TICKS_PER_CYCLE
;
449 *last_syt_offset
= syt_offset
;
451 if (syt_offset
>= TICKS_PER_CYCLE
)
452 syt_offset
= CIP_SYT_NO_INFO
;
457 static void pool_ideal_syt_offsets(struct amdtp_stream
*s
, struct seq_desc
*descs
,
458 unsigned int size
, unsigned int pos
, unsigned int count
)
460 const enum cip_sfc sfc
= s
->sfc
;
461 unsigned int last
= s
->ctx_data
.rx
.last_syt_offset
;
462 unsigned int state
= s
->ctx_data
.rx
.syt_offset_state
;
465 for (i
= 0; i
< count
; ++i
) {
466 struct seq_desc
*desc
= descs
+ pos
;
468 desc
->syt_offset
= calculate_syt_offset(&last
, &state
, sfc
);
470 pos
= (pos
+ 1) % size
;
473 s
->ctx_data
.rx
.last_syt_offset
= last
;
474 s
->ctx_data
.rx
.syt_offset_state
= state
;
477 static unsigned int compute_syt_offset(unsigned int syt
, unsigned int cycle
,
478 unsigned int transfer_delay
)
480 unsigned int cycle_lo
= (cycle
% CYCLES_PER_SECOND
) & 0x0f;
481 unsigned int syt_cycle_lo
= (syt
& 0xf000) >> 12;
482 unsigned int syt_offset
;
485 if (syt_cycle_lo
< cycle_lo
)
486 syt_cycle_lo
+= CIP_SYT_CYCLE_MODULUS
;
487 syt_cycle_lo
-= cycle_lo
;
489 // Subtract transfer delay so that the synchronization offset is not so large
491 syt_offset
= syt_cycle_lo
* TICKS_PER_CYCLE
+ (syt
& 0x0fff);
492 if (syt_offset
< transfer_delay
)
493 syt_offset
+= CIP_SYT_CYCLE_MODULUS
* TICKS_PER_CYCLE
;
495 return syt_offset
- transfer_delay
;
498 // Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
499 // Additionally, the sequence of tx packets is severely checked against any discontinuity
500 // before filling entries in the queue. The calculation is safe even if it looks fragile by
502 static unsigned int calculate_cached_cycle_count(struct amdtp_stream
*s
, unsigned int head
)
504 const unsigned int cache_size
= s
->ctx_data
.tx
.cache
.size
;
505 unsigned int cycles
= s
->ctx_data
.tx
.cache
.pos
;
508 cycles
+= cache_size
;
514 static void cache_seq(struct amdtp_stream
*s
, const struct pkt_desc
*src
, unsigned int desc_count
)
516 const unsigned int transfer_delay
= s
->transfer_delay
;
517 const unsigned int cache_size
= s
->ctx_data
.tx
.cache
.size
;
518 struct seq_desc
*cache
= s
->ctx_data
.tx
.cache
.descs
;
519 unsigned int cache_pos
= s
->ctx_data
.tx
.cache
.pos
;
520 bool aware_syt
= !(s
->flags
& CIP_UNAWARE_SYT
);
523 for (i
= 0; i
< desc_count
; ++i
) {
524 struct seq_desc
*dst
= cache
+ cache_pos
;
526 if (aware_syt
&& src
->syt
!= CIP_SYT_NO_INFO
)
527 dst
->syt_offset
= compute_syt_offset(src
->syt
, src
->cycle
, transfer_delay
);
529 dst
->syt_offset
= CIP_SYT_NO_INFO
;
530 dst
->data_blocks
= src
->data_blocks
;
532 cache_pos
= (cache_pos
+ 1) % cache_size
;
533 src
= amdtp_stream_next_packet_desc(s
, src
);
536 s
->ctx_data
.tx
.cache
.pos
= cache_pos
;
539 static void pool_ideal_seq_descs(struct amdtp_stream
*s
, struct seq_desc
*descs
, unsigned int size
,
540 unsigned int pos
, unsigned int count
)
542 pool_ideal_syt_offsets(s
, descs
, size
, pos
, count
);
544 if (s
->flags
& CIP_BLOCKING
)
545 pool_blocking_data_blocks(s
, descs
, size
, pos
, count
);
547 pool_ideal_nonblocking_data_blocks(s
, descs
, size
, pos
, count
);
550 static void pool_replayed_seq(struct amdtp_stream
*s
, struct seq_desc
*descs
, unsigned int size
,
551 unsigned int pos
, unsigned int count
)
553 struct amdtp_stream
*target
= s
->ctx_data
.rx
.replay_target
;
554 const struct seq_desc
*cache
= target
->ctx_data
.tx
.cache
.descs
;
555 const unsigned int cache_size
= target
->ctx_data
.tx
.cache
.size
;
556 unsigned int cache_pos
= s
->ctx_data
.rx
.cache_pos
;
559 for (i
= 0; i
< count
; ++i
) {
560 descs
[pos
] = cache
[cache_pos
];
561 cache_pos
= (cache_pos
+ 1) % cache_size
;
562 pos
= (pos
+ 1) % size
;
565 s
->ctx_data
.rx
.cache_pos
= cache_pos
;
568 static void pool_seq_descs(struct amdtp_stream
*s
, struct seq_desc
*descs
, unsigned int size
,
569 unsigned int pos
, unsigned int count
)
571 struct amdtp_domain
*d
= s
->domain
;
572 void (*pool_seq_descs
)(struct amdtp_stream
*s
, struct seq_desc
*descs
, unsigned int size
,
573 unsigned int pos
, unsigned int count
);
575 if (!d
->replay
.enable
|| !s
->ctx_data
.rx
.replay_target
) {
576 pool_seq_descs
= pool_ideal_seq_descs
;
578 if (!d
->replay
.on_the_fly
) {
579 pool_seq_descs
= pool_replayed_seq
;
581 struct amdtp_stream
*tx
= s
->ctx_data
.rx
.replay_target
;
582 const unsigned int cache_size
= tx
->ctx_data
.tx
.cache
.size
;
583 const unsigned int cache_pos
= s
->ctx_data
.rx
.cache_pos
;
584 unsigned int cached_cycles
= calculate_cached_cycle_count(tx
, cache_pos
);
586 if (cached_cycles
> count
&& cached_cycles
> cache_size
/ 2)
587 pool_seq_descs
= pool_replayed_seq
;
589 pool_seq_descs
= pool_ideal_seq_descs
;
593 pool_seq_descs(s
, descs
, size
, pos
, count
);
596 static void update_pcm_pointers(struct amdtp_stream
*s
,
597 struct snd_pcm_substream
*pcm
,
602 ptr
= s
->pcm_buffer_pointer
+ frames
;
603 if (ptr
>= pcm
->runtime
->buffer_size
)
604 ptr
-= pcm
->runtime
->buffer_size
;
605 WRITE_ONCE(s
->pcm_buffer_pointer
, ptr
);
607 s
->pcm_period_pointer
+= frames
;
608 if (s
->pcm_period_pointer
>= pcm
->runtime
->period_size
) {
609 s
->pcm_period_pointer
-= pcm
->runtime
->period_size
;
611 // The program in user process should periodically check the status of intermediate
612 // buffer associated to PCM substream to process PCM frames in the buffer, instead
613 // of receiving notification of period elapsed by poll wait.
614 if (!pcm
->runtime
->no_period_wakeup
) {
616 // In software IRQ context for 1394 OHCI.
617 snd_pcm_period_elapsed(pcm
);
619 // In process context of ALSA PCM application under acquired lock of
621 snd_pcm_period_elapsed_under_stream_lock(pcm
);
627 static int queue_packet(struct amdtp_stream
*s
, struct fw_iso_packet
*params
,
632 params
->interrupt
= sched_irq
;
633 params
->tag
= s
->tag
;
636 err
= fw_iso_context_queue(s
->context
, params
, &s
->buffer
.iso_buffer
,
637 s
->buffer
.packets
[s
->packet_index
].offset
);
639 dev_err(&s
->unit
->device
, "queueing error: %d\n", err
);
643 if (++s
->packet_index
>= s
->queue_size
)
649 static inline int queue_out_packet(struct amdtp_stream
*s
,
650 struct fw_iso_packet
*params
, bool sched_irq
)
653 !!(params
->header_length
== 0 && params
->payload_length
== 0);
654 return queue_packet(s
, params
, sched_irq
);
657 static inline int queue_in_packet(struct amdtp_stream
*s
,
658 struct fw_iso_packet
*params
)
660 // Queue one packet for IR context.
661 params
->header_length
= s
->ctx_data
.tx
.ctx_header_size
;
662 params
->payload_length
= s
->ctx_data
.tx
.max_ctx_payload_length
;
663 params
->skip
= false;
664 return queue_packet(s
, params
, false);
667 static void generate_cip_header(struct amdtp_stream
*s
, __be32 cip_header
[2],
668 unsigned int data_block_counter
, unsigned int syt
)
670 cip_header
[0] = cpu_to_be32(READ_ONCE(s
->source_node_id_field
) |
671 (s
->data_block_quadlets
<< CIP_DBS_SHIFT
) |
672 ((s
->sph
<< CIP_SPH_SHIFT
) & CIP_SPH_MASK
) |
674 cip_header
[1] = cpu_to_be32(CIP_EOH
|
675 ((s
->fmt
<< CIP_FMT_SHIFT
) & CIP_FMT_MASK
) |
676 ((s
->ctx_data
.rx
.fdf
<< CIP_FDF_SHIFT
) & CIP_FDF_MASK
) |
677 (syt
& CIP_SYT_MASK
));
680 static void build_it_pkt_header(struct amdtp_stream
*s
, unsigned int cycle
,
681 struct fw_iso_packet
*params
, unsigned int header_length
,
682 unsigned int data_blocks
,
683 unsigned int data_block_counter
,
684 unsigned int syt
, unsigned int index
, u32 curr_cycle_time
)
686 unsigned int payload_length
;
689 payload_length
= data_blocks
* sizeof(__be32
) * s
->data_block_quadlets
;
690 params
->payload_length
= payload_length
;
692 if (header_length
> 0) {
693 cip_header
= (__be32
*)params
->header
;
694 generate_cip_header(s
, cip_header
, data_block_counter
, syt
);
695 params
->header_length
= header_length
;
700 trace_amdtp_packet(s
, cycle
, cip_header
, payload_length
+ header_length
, data_blocks
,
701 data_block_counter
, s
->packet_index
, index
, curr_cycle_time
);
704 static int check_cip_header(struct amdtp_stream
*s
, const __be32
*buf
,
705 unsigned int payload_length
,
706 unsigned int *data_blocks
,
707 unsigned int *data_block_counter
, unsigned int *syt
)
716 cip_header
[0] = be32_to_cpu(buf
[0]);
717 cip_header
[1] = be32_to_cpu(buf
[1]);
720 * This module supports 'Two-quadlet CIP header with SYT field'.
721 * For convenience, also check FMT field is AM824 or not.
723 if ((((cip_header
[0] & CIP_EOH_MASK
) == CIP_EOH
) ||
724 ((cip_header
[1] & CIP_EOH_MASK
) != CIP_EOH
)) &&
725 (!(s
->flags
& CIP_HEADER_WITHOUT_EOH
))) {
726 dev_info_ratelimited(&s
->unit
->device
,
727 "Invalid CIP header for AMDTP: %08X:%08X\n",
728 cip_header
[0], cip_header
[1]);
732 /* Check valid protocol or not. */
733 sph
= (cip_header
[0] & CIP_SPH_MASK
) >> CIP_SPH_SHIFT
;
734 fmt
= (cip_header
[1] & CIP_FMT_MASK
) >> CIP_FMT_SHIFT
;
735 if (sph
!= s
->sph
|| fmt
!= s
->fmt
) {
736 dev_info_ratelimited(&s
->unit
->device
,
737 "Detect unexpected protocol: %08x %08x\n",
738 cip_header
[0], cip_header
[1]);
742 /* Calculate data blocks */
743 fdf
= (cip_header
[1] & CIP_FDF_MASK
) >> CIP_FDF_SHIFT
;
744 if (payload_length
== 0 || (fmt
== CIP_FMT_AM
&& fdf
== AMDTP_FDF_NO_DATA
)) {
747 unsigned int data_block_quadlets
=
748 (cip_header
[0] & CIP_DBS_MASK
) >> CIP_DBS_SHIFT
;
749 /* avoid division by zero */
750 if (data_block_quadlets
== 0) {
751 dev_err(&s
->unit
->device
,
752 "Detect invalid value in dbs field: %08X\n",
756 if (s
->flags
& CIP_WRONG_DBS
)
757 data_block_quadlets
= s
->data_block_quadlets
;
759 *data_blocks
= payload_length
/ sizeof(__be32
) / data_block_quadlets
;
762 /* Check data block counter continuity */
763 dbc
= cip_header
[0] & CIP_DBC_MASK
;
764 if (*data_blocks
== 0 && (s
->flags
& CIP_EMPTY_HAS_WRONG_DBC
) &&
765 *data_block_counter
!= UINT_MAX
)
766 dbc
= *data_block_counter
;
768 if ((dbc
== 0x00 && (s
->flags
& CIP_SKIP_DBC_ZERO_CHECK
)) ||
769 *data_block_counter
== UINT_MAX
) {
771 } else if (!(s
->flags
& CIP_DBC_IS_END_EVENT
)) {
772 lost
= dbc
!= *data_block_counter
;
774 unsigned int dbc_interval
;
776 if (*data_blocks
> 0 && s
->ctx_data
.tx
.dbc_interval
> 0)
777 dbc_interval
= s
->ctx_data
.tx
.dbc_interval
;
779 dbc_interval
= *data_blocks
;
781 lost
= dbc
!= ((*data_block_counter
+ dbc_interval
) & 0xff);
785 dev_err(&s
->unit
->device
,
786 "Detect discontinuity of CIP: %02X %02X\n",
787 *data_block_counter
, dbc
);
791 *data_block_counter
= dbc
;
793 if (!(s
->flags
& CIP_UNAWARE_SYT
))
794 *syt
= cip_header
[1] & CIP_SYT_MASK
;
799 static int parse_ir_ctx_header(struct amdtp_stream
*s
, unsigned int cycle
,
800 const __be32
*ctx_header
,
801 unsigned int *data_blocks
,
802 unsigned int *data_block_counter
,
803 unsigned int *syt
, unsigned int packet_index
, unsigned int index
,
806 unsigned int payload_length
;
807 const __be32
*cip_header
;
808 unsigned int cip_header_size
;
810 payload_length
= be32_to_cpu(ctx_header
[0]) >> ISO_DATA_LENGTH_SHIFT
;
812 if (!(s
->flags
& CIP_NO_HEADER
))
813 cip_header_size
= CIP_HEADER_SIZE
;
817 if (payload_length
> cip_header_size
+ s
->ctx_data
.tx
.max_ctx_payload_length
) {
818 dev_err(&s
->unit
->device
,
819 "Detect jumbo payload: %04x %04x\n",
820 payload_length
, cip_header_size
+ s
->ctx_data
.tx
.max_ctx_payload_length
);
824 if (cip_header_size
> 0) {
825 if (payload_length
>= cip_header_size
) {
828 cip_header
= ctx_header
+ IR_CTX_HEADER_DEFAULT_QUADLETS
;
829 err
= check_cip_header(s
, cip_header
, payload_length
- cip_header_size
,
830 data_blocks
, data_block_counter
, syt
);
834 // Handle the cycle so that empty packet arrives.
841 *data_blocks
= payload_length
/ sizeof(__be32
) / s
->data_block_quadlets
;
844 if (*data_block_counter
== UINT_MAX
)
845 *data_block_counter
= 0;
848 trace_amdtp_packet(s
, cycle
, cip_header
, payload_length
, *data_blocks
,
849 *data_block_counter
, packet_index
, index
, curr_cycle_time
);
854 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
855 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
856 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
857 static inline u32
compute_ohci_iso_ctx_cycle_count(u32 tstamp
)
859 return (((tstamp
>> 13) & 0x07) * CYCLES_PER_SECOND
) + (tstamp
& 0x1fff);
862 static inline u32
compute_ohci_cycle_count(__be32 ctx_header_tstamp
)
864 u32 tstamp
= be32_to_cpu(ctx_header_tstamp
) & HEADER_TSTAMP_MASK
;
865 return compute_ohci_iso_ctx_cycle_count(tstamp
);
868 static inline u32
increment_ohci_cycle_count(u32 cycle
, unsigned int addend
)
871 if (cycle
>= OHCI_SECOND_MODULUS
* CYCLES_PER_SECOND
)
872 cycle
-= OHCI_SECOND_MODULUS
* CYCLES_PER_SECOND
;
876 static inline u32
decrement_ohci_cycle_count(u32 minuend
, u32 subtrahend
)
878 if (minuend
< subtrahend
)
879 minuend
+= OHCI_SECOND_MODULUS
* CYCLES_PER_SECOND
;
881 return minuend
- subtrahend
;
884 static int compare_ohci_cycle_count(u32 lval
, u32 rval
)
888 else if (lval
< rval
&& rval
- lval
< OHCI_SECOND_MODULUS
* CYCLES_PER_SECOND
/ 2)
894 // Align to actual cycle count for the packet which is going to be scheduled.
895 // This module queued the same number of isochronous cycle as the size of queue
896 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
897 // the size of queue for scheduled cycle.
898 static inline u32
compute_ohci_it_cycle(const __be32 ctx_header_tstamp
,
899 unsigned int queue_size
)
901 u32 cycle
= compute_ohci_cycle_count(ctx_header_tstamp
);
902 return increment_ohci_cycle_count(cycle
, queue_size
);
905 static int generate_tx_packet_descs(struct amdtp_stream
*s
, struct pkt_desc
*desc
,
906 const __be32
*ctx_header
, unsigned int packet_count
,
907 unsigned int *desc_count
)
909 unsigned int next_cycle
= s
->next_cycle
;
910 unsigned int dbc
= s
->data_block_counter
;
911 unsigned int packet_index
= s
->packet_index
;
912 unsigned int queue_size
= s
->queue_size
;
913 u32 curr_cycle_time
= 0;
917 if (trace_amdtp_packet_enabled())
918 (void)fw_card_read_cycle_time(fw_parent_device(s
->unit
)->card
, &curr_cycle_time
);
921 for (i
= 0; i
< packet_count
; ++i
) {
924 unsigned int data_blocks
;
927 cycle
= compute_ohci_cycle_count(ctx_header
[1]);
928 lost
= (next_cycle
!= cycle
);
930 if (s
->flags
& CIP_NO_HEADER
) {
931 // Fireface skips transmission just for an isoc cycle corresponding
933 unsigned int prev_cycle
= next_cycle
;
935 next_cycle
= increment_ohci_cycle_count(next_cycle
, 1);
936 lost
= (next_cycle
!= cycle
);
938 // Prepare a description for the skipped cycle for
940 desc
->cycle
= prev_cycle
;
942 desc
->data_blocks
= 0;
943 desc
->data_block_counter
= dbc
;
944 desc
->ctx_payload
= NULL
;
945 desc
= amdtp_stream_next_packet_desc(s
, desc
);
948 } else if (s
->flags
& CIP_JUMBO_PAYLOAD
) {
949 // OXFW970 skips transmission for several isoc cycles during
950 // asynchronous transaction. The sequence replay is impossible due
952 unsigned int safe_cycle
= increment_ohci_cycle_count(next_cycle
,
953 IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES
);
954 lost
= (compare_ohci_cycle_count(safe_cycle
, cycle
) > 0);
957 dev_err(&s
->unit
->device
, "Detect discontinuity of cycle: %d %d\n",
963 err
= parse_ir_ctx_header(s
, cycle
, ctx_header
, &data_blocks
, &dbc
, &syt
,
964 packet_index
, i
, curr_cycle_time
);
970 desc
->data_blocks
= data_blocks
;
971 desc
->data_block_counter
= dbc
;
972 desc
->ctx_payload
= s
->buffer
.packets
[packet_index
].buffer
;
974 if (!(s
->flags
& CIP_DBC_IS_END_EVENT
))
975 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
977 next_cycle
= increment_ohci_cycle_count(next_cycle
, 1);
978 desc
= amdtp_stream_next_packet_desc(s
, desc
);
980 ctx_header
+= s
->ctx_data
.tx
.ctx_header_size
/ sizeof(*ctx_header
);
981 packet_index
= (packet_index
+ 1) % queue_size
;
984 s
->next_cycle
= next_cycle
;
985 s
->data_block_counter
= dbc
;
990 static unsigned int compute_syt(unsigned int syt_offset
, unsigned int cycle
,
991 unsigned int transfer_delay
)
995 syt_offset
+= transfer_delay
;
996 syt
= ((cycle
+ syt_offset
/ TICKS_PER_CYCLE
) << 12) |
997 (syt_offset
% TICKS_PER_CYCLE
);
998 return syt
& CIP_SYT_MASK
;
1001 static void generate_rx_packet_descs(struct amdtp_stream
*s
, struct pkt_desc
*desc
,
1002 const __be32
*ctx_header
, unsigned int packet_count
)
1004 struct seq_desc
*seq_descs
= s
->ctx_data
.rx
.seq
.descs
;
1005 unsigned int seq_size
= s
->ctx_data
.rx
.seq
.size
;
1006 unsigned int seq_pos
= s
->ctx_data
.rx
.seq
.pos
;
1007 unsigned int dbc
= s
->data_block_counter
;
1008 bool aware_syt
= !(s
->flags
& CIP_UNAWARE_SYT
);
1011 pool_seq_descs(s
, seq_descs
, seq_size
, seq_pos
, packet_count
);
1013 for (i
= 0; i
< packet_count
; ++i
) {
1014 unsigned int index
= (s
->packet_index
+ i
) % s
->queue_size
;
1015 const struct seq_desc
*seq
= seq_descs
+ seq_pos
;
1017 desc
->cycle
= compute_ohci_it_cycle(*ctx_header
, s
->queue_size
);
1019 if (aware_syt
&& seq
->syt_offset
!= CIP_SYT_NO_INFO
)
1020 desc
->syt
= compute_syt(seq
->syt_offset
, desc
->cycle
, s
->transfer_delay
);
1022 desc
->syt
= CIP_SYT_NO_INFO
;
1024 desc
->data_blocks
= seq
->data_blocks
;
1026 if (s
->flags
& CIP_DBC_IS_END_EVENT
)
1027 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
1029 desc
->data_block_counter
= dbc
;
1031 if (!(s
->flags
& CIP_DBC_IS_END_EVENT
))
1032 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
1034 desc
->ctx_payload
= s
->buffer
.packets
[index
].buffer
;
1036 seq_pos
= (seq_pos
+ 1) % seq_size
;
1037 desc
= amdtp_stream_next_packet_desc(s
, desc
);
1042 s
->data_block_counter
= dbc
;
1043 s
->ctx_data
.rx
.seq
.pos
= seq_pos
;
1046 static inline void cancel_stream(struct amdtp_stream
*s
)
1048 s
->packet_index
= -1;
1050 amdtp_stream_pcm_abort(s
);
1051 WRITE_ONCE(s
->pcm_buffer_pointer
, SNDRV_PCM_POS_XRUN
);
1054 static snd_pcm_sframes_t
compute_pcm_extra_delay(struct amdtp_stream
*s
,
1055 const struct pkt_desc
*desc
, unsigned int count
)
1057 unsigned int data_block_count
= 0;
1067 // Forward to the latest record.
1068 for (i
= 0; i
< count
- 1; ++i
)
1069 desc
= amdtp_stream_next_packet_desc(s
, desc
);
1070 latest_cycle
= desc
->cycle
;
1072 err
= fw_card_read_cycle_time(fw_parent_device(s
->unit
)->card
, &cycle_time
);
1076 // Compute cycle count with lower 3 bits of second field and cycle field like timestamp
1077 // format of 1394 OHCI isochronous context.
1078 curr_cycle
= compute_ohci_iso_ctx_cycle_count((cycle_time
>> 12) & 0x0000ffff);
1080 if (s
->direction
== AMDTP_IN_STREAM
) {
1081 // NOTE: The AMDTP packet descriptor should be for the past isochronous cycle since
1082 // it corresponds to arrived isochronous packet.
1083 if (compare_ohci_cycle_count(latest_cycle
, curr_cycle
) > 0)
1085 cycle_gap
= decrement_ohci_cycle_count(curr_cycle
, latest_cycle
);
1087 // NOTE: estimate delay by recent history of arrived AMDTP packets. The estimated
1088 // value expectedly corresponds to a few packets (0-2) since the packet arrived at
1089 // the most recent isochronous cycle has been already processed.
1090 for (i
= 0; i
< cycle_gap
; ++i
) {
1091 desc
= amdtp_stream_next_packet_desc(s
, desc
);
1092 data_block_count
+= desc
->data_blocks
;
1095 // NOTE: The AMDTP packet descriptor should be for the future isochronous cycle
1096 // since it was already scheduled.
1097 if (compare_ohci_cycle_count(latest_cycle
, curr_cycle
) < 0)
1099 cycle_gap
= decrement_ohci_cycle_count(latest_cycle
, curr_cycle
);
1101 // NOTE: use history of scheduled packets.
1102 for (i
= 0; i
< cycle_gap
; ++i
) {
1103 data_block_count
+= desc
->data_blocks
;
1104 desc
= prev_packet_desc(s
, desc
);
1108 return data_block_count
* s
->pcm_frame_multiplier
;
1111 static void process_ctx_payloads(struct amdtp_stream
*s
,
1112 const struct pkt_desc
*desc
,
1115 struct snd_pcm_substream
*pcm
;
1118 pcm
= READ_ONCE(s
->pcm
);
1119 s
->process_ctx_payloads(s
, desc
, count
, pcm
);
1122 unsigned int data_block_count
= 0;
1124 pcm
->runtime
->delay
= compute_pcm_extra_delay(s
, desc
, count
);
1126 for (i
= 0; i
< count
; ++i
) {
1127 data_block_count
+= desc
->data_blocks
;
1128 desc
= amdtp_stream_next_packet_desc(s
, desc
);
1131 update_pcm_pointers(s
, pcm
, data_block_count
* s
->pcm_frame_multiplier
);
1135 static void process_rx_packets(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1136 void *header
, void *private_data
)
1138 struct amdtp_stream
*s
= private_data
;
1139 const struct amdtp_domain
*d
= s
->domain
;
1140 const __be32
*ctx_header
= header
;
1141 const unsigned int events_per_period
= d
->events_per_period
;
1142 unsigned int event_count
= s
->ctx_data
.rx
.event_count
;
1143 struct pkt_desc
*desc
= s
->packet_descs_cursor
;
1144 unsigned int pkt_header_length
;
1145 unsigned int packets
;
1146 u32 curr_cycle_time
;
1150 if (s
->packet_index
< 0)
1153 // Calculate the number of packets in buffer and check XRUN.
1154 packets
= header_length
/ sizeof(*ctx_header
);
1156 generate_rx_packet_descs(s
, desc
, ctx_header
, packets
);
1158 process_ctx_payloads(s
, desc
, packets
);
1160 if (!(s
->flags
& CIP_NO_HEADER
))
1161 pkt_header_length
= IT_PKT_HEADER_SIZE_CIP
;
1163 pkt_header_length
= 0;
1165 if (s
== d
->irq_target
) {
1166 // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by
1167 // the tasks of user process operating ALSA PCM character device by calling ioctl(2)
1168 // with some requests, instead of scheduled hardware IRQ of an IT context.
1169 struct snd_pcm_substream
*pcm
= READ_ONCE(s
->pcm
);
1170 need_hw_irq
= !pcm
|| !pcm
->runtime
->no_period_wakeup
;
1172 need_hw_irq
= false;
1175 if (trace_amdtp_packet_enabled())
1176 (void)fw_card_read_cycle_time(fw_parent_device(s
->unit
)->card
, &curr_cycle_time
);
1178 for (i
= 0; i
< packets
; ++i
) {
1180 struct fw_iso_packet params
;
1181 __be32 header
[CIP_HEADER_QUADLETS
];
1182 } template = { {0}, {0} };
1183 bool sched_irq
= false;
1185 build_it_pkt_header(s
, desc
->cycle
, &template.params
, pkt_header_length
,
1186 desc
->data_blocks
, desc
->data_block_counter
,
1187 desc
->syt
, i
, curr_cycle_time
);
1189 if (s
== s
->domain
->irq_target
) {
1190 event_count
+= desc
->data_blocks
;
1191 if (event_count
>= events_per_period
) {
1192 event_count
-= events_per_period
;
1193 sched_irq
= need_hw_irq
;
1197 if (queue_out_packet(s
, &template.params
, sched_irq
) < 0) {
1202 desc
= amdtp_stream_next_packet_desc(s
, desc
);
1205 s
->ctx_data
.rx
.event_count
= event_count
;
1206 s
->packet_descs_cursor
= desc
;
1209 static void skip_rx_packets(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1210 void *header
, void *private_data
)
1212 struct amdtp_stream
*s
= private_data
;
1213 struct amdtp_domain
*d
= s
->domain
;
1214 const __be32
*ctx_header
= header
;
1215 unsigned int packets
;
1219 if (s
->packet_index
< 0)
1222 packets
= header_length
/ sizeof(*ctx_header
);
1224 cycle
= compute_ohci_it_cycle(ctx_header
[packets
- 1], s
->queue_size
);
1225 s
->next_cycle
= increment_ohci_cycle_count(cycle
, 1);
1227 for (i
= 0; i
< packets
; ++i
) {
1228 struct fw_iso_packet params
= {
1230 .payload_length
= 0,
1232 bool sched_irq
= (s
== d
->irq_target
&& i
== packets
- 1);
1234 if (queue_out_packet(s
, ¶ms
, sched_irq
) < 0) {
1241 static void irq_target_callback(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1242 void *header
, void *private_data
);
1244 static void process_rx_packets_intermediately(struct fw_iso_context
*context
, u32 tstamp
,
1245 size_t header_length
, void *header
, void *private_data
)
1247 struct amdtp_stream
*s
= private_data
;
1248 struct amdtp_domain
*d
= s
->domain
;
1249 __be32
*ctx_header
= header
;
1250 const unsigned int queue_size
= s
->queue_size
;
1251 unsigned int packets
;
1252 unsigned int offset
;
1254 if (s
->packet_index
< 0)
1257 packets
= header_length
/ sizeof(*ctx_header
);
1260 while (offset
< packets
) {
1261 unsigned int cycle
= compute_ohci_it_cycle(ctx_header
[offset
], queue_size
);
1263 if (compare_ohci_cycle_count(cycle
, d
->processing_cycle
.rx_start
) >= 0)
1270 unsigned int length
= sizeof(*ctx_header
) * offset
;
1272 skip_rx_packets(context
, tstamp
, length
, ctx_header
, private_data
);
1273 if (amdtp_streaming_error(s
))
1276 ctx_header
+= offset
;
1277 header_length
-= length
;
1280 if (offset
< packets
) {
1281 s
->ready_processing
= true;
1282 wake_up(&s
->ready_wait
);
1284 if (d
->replay
.enable
)
1285 s
->ctx_data
.rx
.cache_pos
= 0;
1287 process_rx_packets(context
, tstamp
, header_length
, ctx_header
, private_data
);
1288 if (amdtp_streaming_error(s
))
1291 if (s
== d
->irq_target
)
1292 s
->context
->callback
.sc
= irq_target_callback
;
1294 s
->context
->callback
.sc
= process_rx_packets
;
1298 static void process_tx_packets(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1299 void *header
, void *private_data
)
1301 struct amdtp_stream
*s
= private_data
;
1302 __be32
*ctx_header
= header
;
1303 struct pkt_desc
*desc
= s
->packet_descs_cursor
;
1304 unsigned int packet_count
;
1305 unsigned int desc_count
;
1309 if (s
->packet_index
< 0)
1312 // Calculate the number of packets in buffer and check XRUN.
1313 packet_count
= header_length
/ s
->ctx_data
.tx
.ctx_header_size
;
1316 err
= generate_tx_packet_descs(s
, desc
, ctx_header
, packet_count
, &desc_count
);
1318 if (err
!= -EAGAIN
) {
1323 struct amdtp_domain
*d
= s
->domain
;
1325 process_ctx_payloads(s
, desc
, desc_count
);
1327 if (d
->replay
.enable
)
1328 cache_seq(s
, desc
, desc_count
);
1330 for (i
= 0; i
< desc_count
; ++i
)
1331 desc
= amdtp_stream_next_packet_desc(s
, desc
);
1332 s
->packet_descs_cursor
= desc
;
1335 for (i
= 0; i
< packet_count
; ++i
) {
1336 struct fw_iso_packet params
= {0};
1338 if (queue_in_packet(s
, ¶ms
) < 0) {
1345 static void drop_tx_packets(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1346 void *header
, void *private_data
)
1348 struct amdtp_stream
*s
= private_data
;
1349 const __be32
*ctx_header
= header
;
1350 unsigned int packets
;
1354 if (s
->packet_index
< 0)
1357 packets
= header_length
/ s
->ctx_data
.tx
.ctx_header_size
;
1359 ctx_header
+= (packets
- 1) * s
->ctx_data
.tx
.ctx_header_size
/ sizeof(*ctx_header
);
1360 cycle
= compute_ohci_cycle_count(ctx_header
[1]);
1361 s
->next_cycle
= increment_ohci_cycle_count(cycle
, 1);
1363 for (i
= 0; i
< packets
; ++i
) {
1364 struct fw_iso_packet params
= {0};
1366 if (queue_in_packet(s
, ¶ms
) < 0) {
1373 static void process_tx_packets_intermediately(struct fw_iso_context
*context
, u32 tstamp
,
1374 size_t header_length
, void *header
, void *private_data
)
1376 struct amdtp_stream
*s
= private_data
;
1377 struct amdtp_domain
*d
= s
->domain
;
1379 unsigned int packets
;
1380 unsigned int offset
;
1382 if (s
->packet_index
< 0)
1385 packets
= header_length
/ s
->ctx_data
.tx
.ctx_header_size
;
1388 ctx_header
= header
;
1389 while (offset
< packets
) {
1390 unsigned int cycle
= compute_ohci_cycle_count(ctx_header
[1]);
1392 if (compare_ohci_cycle_count(cycle
, d
->processing_cycle
.tx_start
) >= 0)
1395 ctx_header
+= s
->ctx_data
.tx
.ctx_header_size
/ sizeof(__be32
);
1399 ctx_header
= header
;
1402 size_t length
= s
->ctx_data
.tx
.ctx_header_size
* offset
;
1404 drop_tx_packets(context
, tstamp
, length
, ctx_header
, s
);
1405 if (amdtp_streaming_error(s
))
1408 ctx_header
+= length
/ sizeof(*ctx_header
);
1409 header_length
-= length
;
1412 if (offset
< packets
) {
1413 s
->ready_processing
= true;
1414 wake_up(&s
->ready_wait
);
1416 process_tx_packets(context
, tstamp
, header_length
, ctx_header
, s
);
1417 if (amdtp_streaming_error(s
))
1420 context
->callback
.sc
= process_tx_packets
;
1424 static void drop_tx_packets_initially(struct fw_iso_context
*context
, u32 tstamp
,
1425 size_t header_length
, void *header
, void *private_data
)
1427 struct amdtp_stream
*s
= private_data
;
1428 struct amdtp_domain
*d
= s
->domain
;
1431 unsigned int events
;
1434 if (s
->packet_index
< 0)
1437 count
= header_length
/ s
->ctx_data
.tx
.ctx_header_size
;
1439 // Attempt to detect any event in the batch of packets.
1441 ctx_header
= header
;
1442 for (i
= 0; i
< count
; ++i
) {
1443 unsigned int payload_quads
=
1444 (be32_to_cpu(*ctx_header
) >> ISO_DATA_LENGTH_SHIFT
) / sizeof(__be32
);
1445 unsigned int data_blocks
;
1447 if (s
->flags
& CIP_NO_HEADER
) {
1448 data_blocks
= payload_quads
/ s
->data_block_quadlets
;
1450 __be32
*cip_headers
= ctx_header
+ IR_CTX_HEADER_DEFAULT_QUADLETS
;
1452 if (payload_quads
< CIP_HEADER_QUADLETS
) {
1455 payload_quads
-= CIP_HEADER_QUADLETS
;
1457 if (s
->flags
& CIP_UNAWARE_SYT
) {
1458 data_blocks
= payload_quads
/ s
->data_block_quadlets
;
1460 u32 cip1
= be32_to_cpu(cip_headers
[1]);
1462 // NODATA packet can includes any data blocks but they are
1463 // not available as event.
1464 if ((cip1
& CIP_NO_DATA
) == CIP_NO_DATA
)
1467 data_blocks
= payload_quads
/ s
->data_block_quadlets
;
1472 events
+= data_blocks
;
1474 ctx_header
+= s
->ctx_data
.tx
.ctx_header_size
/ sizeof(__be32
);
1477 drop_tx_packets(context
, tstamp
, header_length
, header
, s
);
1480 s
->ctx_data
.tx
.event_starts
= true;
1482 // Decide the cycle count to begin processing content of packet in IR contexts.
1484 unsigned int stream_count
= 0;
1485 unsigned int event_starts_count
= 0;
1486 unsigned int cycle
= UINT_MAX
;
1488 list_for_each_entry(s
, &d
->streams
, list
) {
1489 if (s
->direction
== AMDTP_IN_STREAM
) {
1491 if (s
->ctx_data
.tx
.event_starts
)
1492 ++event_starts_count
;
1496 if (stream_count
== event_starts_count
) {
1497 unsigned int next_cycle
;
1499 list_for_each_entry(s
, &d
->streams
, list
) {
1500 if (s
->direction
!= AMDTP_IN_STREAM
)
1503 next_cycle
= increment_ohci_cycle_count(s
->next_cycle
,
1504 d
->processing_cycle
.tx_init_skip
);
1505 if (cycle
== UINT_MAX
||
1506 compare_ohci_cycle_count(next_cycle
, cycle
) > 0)
1509 s
->context
->callback
.sc
= process_tx_packets_intermediately
;
1512 d
->processing_cycle
.tx_start
= cycle
;
1517 static void process_ctxs_in_domain(struct amdtp_domain
*d
)
1519 struct amdtp_stream
*s
;
1521 list_for_each_entry(s
, &d
->streams
, list
) {
1522 if (s
!= d
->irq_target
&& amdtp_stream_running(s
))
1523 fw_iso_context_flush_completions(s
->context
);
1525 if (amdtp_streaming_error(s
))
1531 if (amdtp_stream_running(d
->irq_target
))
1532 cancel_stream(d
->irq_target
);
1534 list_for_each_entry(s
, &d
->streams
, list
) {
1535 if (amdtp_stream_running(s
))
1540 static void irq_target_callback(struct fw_iso_context
*context
, u32 tstamp
, size_t header_length
,
1541 void *header
, void *private_data
)
1543 struct amdtp_stream
*s
= private_data
;
1544 struct amdtp_domain
*d
= s
->domain
;
1546 process_rx_packets(context
, tstamp
, header_length
, header
, private_data
);
1547 process_ctxs_in_domain(d
);
1550 static void irq_target_callback_intermediately(struct fw_iso_context
*context
, u32 tstamp
,
1551 size_t header_length
, void *header
, void *private_data
)
1553 struct amdtp_stream
*s
= private_data
;
1554 struct amdtp_domain
*d
= s
->domain
;
1556 process_rx_packets_intermediately(context
, tstamp
, header_length
, header
, private_data
);
1557 process_ctxs_in_domain(d
);
1560 static void irq_target_callback_skip(struct fw_iso_context
*context
, u32 tstamp
,
1561 size_t header_length
, void *header
, void *private_data
)
1563 struct amdtp_stream
*s
= private_data
;
1564 struct amdtp_domain
*d
= s
->domain
;
1565 bool ready_to_start
;
1567 skip_rx_packets(context
, tstamp
, header_length
, header
, private_data
);
1568 process_ctxs_in_domain(d
);
1570 if (d
->replay
.enable
&& !d
->replay
.on_the_fly
) {
1571 unsigned int rx_count
= 0;
1572 unsigned int rx_ready_count
= 0;
1573 struct amdtp_stream
*rx
;
1575 list_for_each_entry(rx
, &d
->streams
, list
) {
1576 struct amdtp_stream
*tx
;
1577 unsigned int cached_cycles
;
1579 if (rx
->direction
!= AMDTP_OUT_STREAM
)
1583 tx
= rx
->ctx_data
.rx
.replay_target
;
1584 cached_cycles
= calculate_cached_cycle_count(tx
, 0);
1585 if (cached_cycles
> tx
->ctx_data
.tx
.cache
.size
/ 2)
1589 ready_to_start
= (rx_count
== rx_ready_count
);
1591 ready_to_start
= true;
1594 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
1595 // contexts are expected to start and get callback when reaching here.
1596 if (ready_to_start
) {
1597 unsigned int cycle
= s
->next_cycle
;
1598 list_for_each_entry(s
, &d
->streams
, list
) {
1599 if (s
->direction
!= AMDTP_OUT_STREAM
)
1602 if (compare_ohci_cycle_count(s
->next_cycle
, cycle
) > 0)
1603 cycle
= s
->next_cycle
;
1605 if (s
== d
->irq_target
)
1606 s
->context
->callback
.sc
= irq_target_callback_intermediately
;
1608 s
->context
->callback
.sc
= process_rx_packets_intermediately
;
1611 d
->processing_cycle
.rx_start
= cycle
;
1615 // This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
1616 // transmit first packet.
1617 static void amdtp_stream_first_callback(struct fw_iso_context
*context
,
1618 u32 tstamp
, size_t header_length
,
1619 void *header
, void *private_data
)
1621 struct amdtp_stream
*s
= private_data
;
1622 struct amdtp_domain
*d
= s
->domain
;
1624 if (s
->direction
== AMDTP_IN_STREAM
) {
1625 context
->callback
.sc
= drop_tx_packets_initially
;
1627 if (s
== d
->irq_target
)
1628 context
->callback
.sc
= irq_target_callback_skip
;
1630 context
->callback
.sc
= skip_rx_packets
;
1633 context
->callback
.sc(context
, tstamp
, header_length
, header
, s
);
1637 * amdtp_stream_start - start transferring packets
1638 * @s: the AMDTP stream to start
1639 * @channel: the isochronous channel on the bus
1640 * @speed: firewire speed code
1641 * @queue_size: The number of packets in the queue.
1642 * @idle_irq_interval: the interval to queue packet during initial state.
1644 * The stream cannot be started until it has been configured with
1645 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1646 * device can be started.
1648 static int amdtp_stream_start(struct amdtp_stream
*s
, int channel
, int speed
,
1649 unsigned int queue_size
, unsigned int idle_irq_interval
)
1651 bool is_irq_target
= (s
== s
->domain
->irq_target
);
1652 unsigned int ctx_header_size
;
1653 unsigned int max_ctx_payload_size
;
1654 enum dma_data_direction dir
;
1655 struct pkt_desc
*descs
;
1656 int i
, type
, tag
, err
;
1658 mutex_lock(&s
->mutex
);
1660 if (WARN_ON(amdtp_stream_running(s
) ||
1661 (s
->data_block_quadlets
< 1))) {
1666 if (s
->direction
== AMDTP_IN_STREAM
) {
1667 // NOTE: IT context should be used for constant IRQ.
1668 if (is_irq_target
) {
1673 s
->data_block_counter
= UINT_MAX
;
1675 s
->data_block_counter
= 0;
1678 // initialize packet buffer.
1679 if (s
->direction
== AMDTP_IN_STREAM
) {
1680 dir
= DMA_FROM_DEVICE
;
1681 type
= FW_ISO_CONTEXT_RECEIVE
;
1682 if (!(s
->flags
& CIP_NO_HEADER
))
1683 ctx_header_size
= IR_CTX_HEADER_SIZE_CIP
;
1685 ctx_header_size
= IR_CTX_HEADER_SIZE_NO_CIP
;
1687 dir
= DMA_TO_DEVICE
;
1688 type
= FW_ISO_CONTEXT_TRANSMIT
;
1689 ctx_header_size
= 0; // No effect for IT context.
1691 max_ctx_payload_size
= amdtp_stream_get_max_ctx_payload_size(s
);
1693 err
= iso_packets_buffer_init(&s
->buffer
, s
->unit
, queue_size
, max_ctx_payload_size
, dir
);
1696 s
->queue_size
= queue_size
;
1698 s
->context
= fw_iso_context_create(fw_parent_device(s
->unit
)->card
,
1699 type
, channel
, speed
, ctx_header_size
,
1700 amdtp_stream_first_callback
, s
);
1701 if (IS_ERR(s
->context
)) {
1702 err
= PTR_ERR(s
->context
);
1704 dev_err(&s
->unit
->device
,
1705 "no free stream on this controller\n");
1709 amdtp_stream_update(s
);
1711 if (s
->direction
== AMDTP_IN_STREAM
) {
1712 s
->ctx_data
.tx
.max_ctx_payload_length
= max_ctx_payload_size
;
1713 s
->ctx_data
.tx
.ctx_header_size
= ctx_header_size
;
1714 s
->ctx_data
.tx
.event_starts
= false;
1716 if (s
->domain
->replay
.enable
) {
1717 // struct fw_iso_context.drop_overflow_headers is false therefore it's
1718 // possible to cache much unexpectedly.
1719 s
->ctx_data
.tx
.cache
.size
= max_t(unsigned int, s
->syt_interval
* 2,
1720 queue_size
* 3 / 2);
1721 s
->ctx_data
.tx
.cache
.pos
= 0;
1722 s
->ctx_data
.tx
.cache
.descs
= kcalloc(s
->ctx_data
.tx
.cache
.size
,
1723 sizeof(*s
->ctx_data
.tx
.cache
.descs
), GFP_KERNEL
);
1724 if (!s
->ctx_data
.tx
.cache
.descs
) {
1730 static const struct {
1731 unsigned int data_block
;
1732 unsigned int syt_offset
;
1733 } *entry
, initial_state
[] = {
1734 [CIP_SFC_32000
] = { 4, 3072 },
1735 [CIP_SFC_48000
] = { 6, 1024 },
1736 [CIP_SFC_96000
] = { 12, 1024 },
1737 [CIP_SFC_192000
] = { 24, 1024 },
1738 [CIP_SFC_44100
] = { 0, 67 },
1739 [CIP_SFC_88200
] = { 0, 67 },
1740 [CIP_SFC_176400
] = { 0, 67 },
1743 s
->ctx_data
.rx
.seq
.descs
= kcalloc(queue_size
, sizeof(*s
->ctx_data
.rx
.seq
.descs
), GFP_KERNEL
);
1744 if (!s
->ctx_data
.rx
.seq
.descs
) {
1748 s
->ctx_data
.rx
.seq
.size
= queue_size
;
1749 s
->ctx_data
.rx
.seq
.pos
= 0;
1751 entry
= &initial_state
[s
->sfc
];
1752 s
->ctx_data
.rx
.data_block_state
= entry
->data_block
;
1753 s
->ctx_data
.rx
.syt_offset_state
= entry
->syt_offset
;
1754 s
->ctx_data
.rx
.last_syt_offset
= TICKS_PER_CYCLE
;
1756 s
->ctx_data
.rx
.event_count
= 0;
1759 if (s
->flags
& CIP_NO_HEADER
)
1760 s
->tag
= TAG_NO_CIP_HEADER
;
1764 // NOTE: When operating without hardIRQ/softIRQ, applications tends to call ioctl request
1765 // for runtime of PCM substream in the interval equivalent to the size of PCM buffer. It
1766 // could take a round over queue of AMDTP packet descriptors and small loss of history. For
1767 // safe, keep more 8 elements for the queue, equivalent to 1 ms.
1768 descs
= kcalloc(s
->queue_size
+ 8, sizeof(*descs
), GFP_KERNEL
);
1773 s
->packet_descs
= descs
;
1775 INIT_LIST_HEAD(&s
->packet_descs_list
);
1776 for (i
= 0; i
< s
->queue_size
; ++i
) {
1777 INIT_LIST_HEAD(&descs
->link
);
1778 list_add_tail(&descs
->link
, &s
->packet_descs_list
);
1781 s
->packet_descs_cursor
= list_first_entry(&s
->packet_descs_list
, struct pkt_desc
, link
);
1783 s
->packet_index
= 0;
1785 struct fw_iso_packet params
;
1787 if (s
->direction
== AMDTP_IN_STREAM
) {
1788 err
= queue_in_packet(s
, ¶ms
);
1790 bool sched_irq
= false;
1792 params
.header_length
= 0;
1793 params
.payload_length
= 0;
1795 if (is_irq_target
) {
1796 sched_irq
= !((s
->packet_index
+ 1) %
1800 err
= queue_out_packet(s
, ¶ms
, sched_irq
);
1804 } while (s
->packet_index
> 0);
1806 /* NOTE: TAG1 matches CIP. This just affects in stream. */
1807 tag
= FW_ISO_CONTEXT_MATCH_TAG1
;
1808 if ((s
->flags
& CIP_EMPTY_WITH_TAG0
) || (s
->flags
& CIP_NO_HEADER
))
1809 tag
|= FW_ISO_CONTEXT_MATCH_TAG0
;
1811 s
->ready_processing
= false;
1812 err
= fw_iso_context_start(s
->context
, -1, 0, tag
);
1816 mutex_unlock(&s
->mutex
);
1820 kfree(s
->packet_descs
);
1821 s
->packet_descs
= NULL
;
1823 if (s
->direction
== AMDTP_OUT_STREAM
) {
1824 kfree(s
->ctx_data
.rx
.seq
.descs
);
1826 if (s
->domain
->replay
.enable
)
1827 kfree(s
->ctx_data
.tx
.cache
.descs
);
1829 fw_iso_context_destroy(s
->context
);
1830 s
->context
= ERR_PTR(-1);
1832 iso_packets_buffer_destroy(&s
->buffer
, s
->unit
);
1834 mutex_unlock(&s
->mutex
);
1840 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1841 * @d: the AMDTP domain.
1842 * @s: the AMDTP stream that transports the PCM data
1844 * Returns the current buffer position, in frames.
1846 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain
*d
,
1847 struct amdtp_stream
*s
)
1849 struct amdtp_stream
*irq_target
= d
->irq_target
;
1851 // Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
1852 if (irq_target
&& amdtp_stream_running(irq_target
)) {
1853 // In software IRQ context, the call causes dead-lock to disable the tasklet
1856 fw_iso_context_flush_completions(irq_target
->context
);
1859 return READ_ONCE(s
->pcm_buffer_pointer
);
1861 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer
);
1864 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1865 * @d: the AMDTP domain.
1866 * @s: the AMDTP stream that transfers the PCM frames
1868 * Returns zero always.
1870 int amdtp_domain_stream_pcm_ack(struct amdtp_domain
*d
, struct amdtp_stream
*s
)
1872 struct amdtp_stream
*irq_target
= d
->irq_target
;
1874 // Process isochronous packets for recent isochronous cycle to handle
1875 // queued PCM frames.
1876 if (irq_target
&& amdtp_stream_running(irq_target
))
1877 fw_iso_context_flush_completions(irq_target
->context
);
1881 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack
);
1884 * amdtp_stream_update - update the stream after a bus reset
1885 * @s: the AMDTP stream
1887 void amdtp_stream_update(struct amdtp_stream
*s
)
1890 WRITE_ONCE(s
->source_node_id_field
,
1891 (fw_parent_device(s
->unit
)->card
->node_id
<< CIP_SID_SHIFT
) & CIP_SID_MASK
);
1893 EXPORT_SYMBOL(amdtp_stream_update
);
1896 * amdtp_stream_stop - stop sending packets
1897 * @s: the AMDTP stream to stop
1899 * All PCM and MIDI devices of the stream must be stopped before the stream
1900 * itself can be stopped.
1902 static void amdtp_stream_stop(struct amdtp_stream
*s
)
1904 mutex_lock(&s
->mutex
);
1906 if (!amdtp_stream_running(s
)) {
1907 mutex_unlock(&s
->mutex
);
1911 fw_iso_context_stop(s
->context
);
1912 fw_iso_context_destroy(s
->context
);
1913 s
->context
= ERR_PTR(-1);
1914 iso_packets_buffer_destroy(&s
->buffer
, s
->unit
);
1915 kfree(s
->packet_descs
);
1916 s
->packet_descs
= NULL
;
1918 if (s
->direction
== AMDTP_OUT_STREAM
) {
1919 kfree(s
->ctx_data
.rx
.seq
.descs
);
1921 if (s
->domain
->replay
.enable
)
1922 kfree(s
->ctx_data
.tx
.cache
.descs
);
1925 mutex_unlock(&s
->mutex
);
1929 * amdtp_stream_pcm_abort - abort the running PCM device
1930 * @s: the AMDTP stream about to be stopped
1932 * If the isochronous stream needs to be stopped asynchronously, call this
1933 * function first to stop the PCM device.
1935 void amdtp_stream_pcm_abort(struct amdtp_stream
*s
)
1937 struct snd_pcm_substream
*pcm
;
1939 pcm
= READ_ONCE(s
->pcm
);
1941 snd_pcm_stop_xrun(pcm
);
1943 EXPORT_SYMBOL(amdtp_stream_pcm_abort
);
1946 * amdtp_domain_init - initialize an AMDTP domain structure
1947 * @d: the AMDTP domain to initialize.
1949 int amdtp_domain_init(struct amdtp_domain
*d
)
1951 INIT_LIST_HEAD(&d
->streams
);
1953 d
->events_per_period
= 0;
1957 EXPORT_SYMBOL_GPL(amdtp_domain_init
);
1960 * amdtp_domain_destroy - destroy an AMDTP domain structure
1961 * @d: the AMDTP domain to destroy.
1963 void amdtp_domain_destroy(struct amdtp_domain
*d
)
1965 // At present nothing to do.
1968 EXPORT_SYMBOL_GPL(amdtp_domain_destroy
);
1971 * amdtp_domain_add_stream - register isoc context into the domain.
1972 * @d: the AMDTP domain.
1973 * @s: the AMDTP stream.
1974 * @channel: the isochronous channel on the bus.
1975 * @speed: firewire speed code.
1977 int amdtp_domain_add_stream(struct amdtp_domain
*d
, struct amdtp_stream
*s
,
1978 int channel
, int speed
)
1980 struct amdtp_stream
*tmp
;
1982 list_for_each_entry(tmp
, &d
->streams
, list
) {
1987 list_add(&s
->list
, &d
->streams
);
1989 s
->channel
= channel
;
1995 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream
);
1997 // Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams
1998 // is less than the number of rx streams, the first tx stream is selected.
1999 static int make_association(struct amdtp_domain
*d
)
2001 unsigned int dst_index
= 0;
2002 struct amdtp_stream
*rx
;
2004 // Make association to replay target.
2005 list_for_each_entry(rx
, &d
->streams
, list
) {
2006 if (rx
->direction
== AMDTP_OUT_STREAM
) {
2007 unsigned int src_index
= 0;
2008 struct amdtp_stream
*tx
= NULL
;
2009 struct amdtp_stream
*s
;
2011 list_for_each_entry(s
, &d
->streams
, list
) {
2012 if (s
->direction
== AMDTP_IN_STREAM
) {
2013 if (dst_index
== src_index
) {
2022 // Select the first entry.
2023 list_for_each_entry(s
, &d
->streams
, list
) {
2024 if (s
->direction
== AMDTP_IN_STREAM
) {
2029 // No target is available to replay sequence.
2034 rx
->ctx_data
.rx
.replay_target
= tx
;
2044 * amdtp_domain_start - start sending packets for isoc context in the domain.
2045 * @d: the AMDTP domain.
2046 * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
2048 * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
2050 * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay
2051 * according to arrival of events in tx packets.
2053 int amdtp_domain_start(struct amdtp_domain
*d
, unsigned int tx_init_skip_cycles
, bool replay_seq
,
2054 bool replay_on_the_fly
)
2056 unsigned int events_per_buffer
= d
->events_per_buffer
;
2057 unsigned int events_per_period
= d
->events_per_period
;
2058 unsigned int queue_size
;
2059 struct amdtp_stream
*s
;
2064 err
= make_association(d
);
2068 d
->replay
.enable
= replay_seq
;
2069 d
->replay
.on_the_fly
= replay_on_the_fly
;
2071 // Select an IT context as IRQ target.
2072 list_for_each_entry(s
, &d
->streams
, list
) {
2073 if (s
->direction
== AMDTP_OUT_STREAM
) {
2082 d
->processing_cycle
.tx_init_skip
= tx_init_skip_cycles
;
2084 // This is a case that AMDTP streams in domain run just for MIDI
2085 // substream. Use the number of events equivalent to 10 msec as
2086 // interval of hardware IRQ.
2087 if (events_per_period
== 0)
2088 events_per_period
= amdtp_rate_table
[d
->irq_target
->sfc
] / 100;
2089 if (events_per_buffer
== 0)
2090 events_per_buffer
= events_per_period
* 3;
2092 queue_size
= DIV_ROUND_UP(CYCLES_PER_SECOND
* events_per_buffer
,
2093 amdtp_rate_table
[d
->irq_target
->sfc
]);
2095 list_for_each_entry(s
, &d
->streams
, list
) {
2096 unsigned int idle_irq_interval
= 0;
2098 if (s
->direction
== AMDTP_OUT_STREAM
&& s
== d
->irq_target
) {
2099 idle_irq_interval
= DIV_ROUND_UP(CYCLES_PER_SECOND
* events_per_period
,
2100 amdtp_rate_table
[d
->irq_target
->sfc
]);
2103 // Starts immediately but actually DMA context starts several hundred cycles later.
2104 err
= amdtp_stream_start(s
, s
->channel
, s
->speed
, queue_size
, idle_irq_interval
);
2111 list_for_each_entry(s
, &d
->streams
, list
)
2112 amdtp_stream_stop(s
);
2115 EXPORT_SYMBOL_GPL(amdtp_domain_start
);
2118 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
2119 * @d: the AMDTP domain to which the isoc contexts belong.
2121 void amdtp_domain_stop(struct amdtp_domain
*d
)
2123 struct amdtp_stream
*s
, *next
;
2126 amdtp_stream_stop(d
->irq_target
);
2128 list_for_each_entry_safe(s
, next
, &d
->streams
, list
) {
2131 if (s
!= d
->irq_target
)
2132 amdtp_stream_stop(s
);
2135 d
->events_per_period
= 0;
2136 d
->irq_target
= NULL
;
2138 EXPORT_SYMBOL_GPL(amdtp_domain_stop
);