int midi_db_count;
unsigned int midi_db_interval;
+
+ struct amdtp_motu_cache *cache;
};
int amdtp_motu_set_parameters(struct amdtp_stream *s, unsigned int rate,
}
}
+static void cache_event_offsets(struct amdtp_motu_cache *cache, const __be32 *buf,
+ unsigned int data_blocks, unsigned int data_block_quadlets)
+{
+ unsigned int *event_offsets = cache->event_offsets;
+ const unsigned int cache_size = cache->size;
+ unsigned int cache_tail = cache->tail;
+ unsigned int base_tick = cache->tx_cycle_count * TICKS_PER_CYCLE;
+ int i;
+
+ for (i = 0; i < data_blocks; ++i) {
+ u32 sph = be32_to_cpu(*buf);
+ unsigned int tick;
+
+ tick = ((sph & CIP_SPH_CYCLE_MASK) >> CIP_SPH_CYCLE_SHIFT) * TICKS_PER_CYCLE +
+ (sph & CIP_SPH_OFFSET_MASK);
+
+ if (tick < base_tick)
+ tick += TICKS_PER_SECOND;
+ event_offsets[cache_tail] = tick - base_tick;
+
+ cache_tail = (cache_tail + 1) % cache_size;
+ buf += data_block_quadlets;
+ }
+
+ cache->tail = cache_tail;
+ cache->tx_cycle_count = (cache->tx_cycle_count + 1) % CYCLES_PER_SECOND;
+}
+
static unsigned int process_ir_ctx_payloads(struct amdtp_stream *s,
const struct pkt_desc *descs,
unsigned int packets,
unsigned int pcm_frames = 0;
int i;
+ if (p->cache->tx_cycle_count == UINT_MAX)
+ p->cache->tx_cycle_count = (s->domain->processing_cycle.tx_start % CYCLES_PER_SECOND);
+
// For data block processing.
for (i = 0; i < packets; ++i) {
const struct pkt_desc *desc = descs + i;
__be32 *buf = desc->ctx_payload;
unsigned int data_blocks = desc->data_blocks;
+ cache_event_offsets(p->cache, buf, data_blocks, s->data_block_quadlets);
+
if (pcm) {
read_pcm_s32(s, pcm, buf, data_blocks, pcm_frames);
pcm_frames += data_blocks;
int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir,
- const struct snd_motu_spec *spec)
+ const struct snd_motu_spec *spec, struct amdtp_motu_cache *cache)
{
amdtp_stream_process_ctx_payloads_t process_ctx_payloads;
int fmt = CIP_FMT_MOTU;
unsigned int flags = CIP_BLOCKING | CIP_UNAWARE_SYT;
+ struct amdtp_motu *p;
int err;
if (dir == AMDTP_IN_STREAM) {
s->ctx_data.rx.fdf = MOTU_FDF_AM824;
}
+ p = s->protocol;
+ p->cache = cache;
+
return 0;
}
fw_iso_resources_free(&motu->tx_resources);
fw_iso_resources_free(&motu->rx_resources);
+ kfree(motu->cache.event_offsets);
+ motu->cache.event_offsets = NULL;
+
err = snd_motu_protocol_set_clock_rate(motu, rate);
if (err < 0) {
dev_err(&motu->unit->device,
fw_iso_resources_free(&motu->rx_resources);
return err;
}
+
+ motu->cache.size = motu->tx_stream.syt_interval * frames_per_buffer;
+ motu->cache.event_offsets = kcalloc(motu->cache.size, sizeof(*motu->cache.event_offsets),
+ GFP_KERNEL);
+ if (!motu->cache.event_offsets) {
+ fw_iso_resources_free(&motu->tx_resources);
+ fw_iso_resources_free(&motu->rx_resources);
+ return err;
+ }
}
return 0;
if (err < 0)
goto stop_streams;
+ motu->cache.tail = 0;
+ motu->cache.tx_cycle_count = UINT_MAX;
+
err = amdtp_domain_start(&motu->domain, 0, false, false);
if (err < 0)
goto stop_streams;
fw_iso_resources_free(&motu->tx_resources);
fw_iso_resources_free(&motu->rx_resources);
+
+ kfree(motu->cache.event_offsets);
+ motu->cache.event_offsets = NULL;
}
}
if (err < 0)
return err;
- err = amdtp_motu_init(s, motu->unit, dir, motu->spec);
+ err = amdtp_motu_init(s, motu->unit, dir, motu->spec, &motu->cache);
if (err < 0)
fw_iso_resources_destroy(resources);
unsigned char pcm_chunks[3];
};
+struct amdtp_motu_cache {
+ unsigned int *event_offsets;
+ unsigned int size;
+ unsigned int tail;
+ unsigned int tx_cycle_count;
+};
+
struct snd_motu {
struct snd_card *card;
struct fw_unit *unit;
wait_queue_head_t hwdep_wait;
struct amdtp_domain domain;
+
+ struct amdtp_motu_cache cache;
};
enum snd_motu_spec_flags {
int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir,
- const struct snd_motu_spec *spec);
+ const struct snd_motu_spec *spec,
+ struct amdtp_motu_cache *cache);
int amdtp_motu_set_parameters(struct amdtp_stream *s, unsigned int rate,
unsigned int midi_ports,
struct snd_motu_packet_format *formats);