]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/staging/tidspbridge/core/chnl_sm.c
Fix common misspellings
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / tidspbridge / core / chnl_sm.c
CommitLineData
999e07d6
ORL
1/*
2 * chnl_sm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Implements upper edge functions for Bridge driver channel module.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19/*
20 * The lower edge functions must be implemented by the Bridge driver
21 * writer, and are declared in chnl_sm.h.
22 *
23 * Care is taken in this code to prevent simulataneous access to channel
24 * queues from
25 * 1. Threads.
26 * 2. io_dpc(), scheduled from the io_isr() as an event.
27 *
28 * This is done primarily by:
29 * - Semaphores.
30 * - state flags in the channel object; and
31 * - ensuring the IO_Dispatch() routine, which is called from both
32 * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
33 *
34 * Channel Invariant:
35 * There is an important invariant condition which must be maintained per
36 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
37 * which may cause timeouts and/or failure offunction sync_wait_on_event.
38 * This invariant condition is:
39 *
121e8f9b 40 * list_empty(&pchnl->io_completions) ==> pchnl->sync_event is reset
999e07d6 41 * and
121e8f9b 42 * !list_empty(&pchnl->io_completions) ==> pchnl->sync_event is set.
999e07d6
ORL
43 */
44
2094f12d
NM
45#include <linux/types.h>
46
999e07d6
ORL
47/* ----------------------------------- OS */
48#include <dspbridge/host_os.h>
49
50/* ----------------------------------- DSP/BIOS Bridge */
999e07d6
ORL
51#include <dspbridge/dbdefs.h>
52
53/* ----------------------------------- Trace & Debug */
54#include <dspbridge/dbc.h>
55
56/* ----------------------------------- OS Adaptation Layer */
999e07d6
ORL
57#include <dspbridge/sync.h>
58
59/* ----------------------------------- Bridge Driver */
60#include <dspbridge/dspdefs.h>
61#include <dspbridge/dspchnl.h>
62#include "_tiomap.h"
63
64/* ----------------------------------- Platform Manager */
65#include <dspbridge/dev.h>
66
67/* ----------------------------------- Others */
68#include <dspbridge/io_sm.h>
69
70/* ----------------------------------- Define for This */
71#define USERMODE_ADDR PAGE_OFFSET
72
73#define MAILBOX_IRQ INT_MAIL_MPU_IRQ
74
75/* ----------------------------------- Function Prototypes */
3c6bf30f 76static int create_chirp_list(struct list_head *list, u32 chirps);
999e07d6 77
3c6bf30f 78static void free_chirp_list(struct list_head *list);
999e07d6
ORL
79
80static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
e6bf74f0 81 u32 *chnl);
999e07d6
ORL
82
83/*
84 * ======== bridge_chnl_add_io_req ========
85 * Enqueue an I/O request for data transfer on a channel to the DSP.
86 * The direction (mode) is specified in the channel object. Note the DSP
87 * address is specified for channels opened in direct I/O mode.
88 */
daa89e6c 89int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
999e07d6 90 u32 byte_size, u32 buf_size,
21aaf42e 91 u32 dw_dsp_addr, u32 dw_arg)
999e07d6
ORL
92{
93 int status = 0;
94 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
95 struct chnl_irp *chnl_packet_obj = NULL;
96 struct bridge_dev_context *dev_ctxt;
97 struct dev_object *dev_obj;
98 u8 dw_state;
99 bool is_eos;
100 struct chnl_mgr *chnl_mgr_obj = pchnl->chnl_mgr_obj;
101 u8 *host_sys_buf = NULL;
102 bool sched_dpc = false;
103 u16 mb_val = 0;
104
105 is_eos = (byte_size == 0);
106
107 /* Validate args */
d65c14b3
IN
108 if (!host_buf || !pchnl)
109 return -EFAULT;
110
111 if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode))
112 return -EPERM;
113
114 /*
115 * Check the channel state: only queue chirp if channel state
116 * allows it.
117 */
3c882de5 118 dw_state = pchnl->state;
d65c14b3
IN
119 if (dw_state != CHNL_STATEREADY) {
120 if (dw_state & CHNL_STATECANCEL)
121 return -ECANCELED;
122 if ((dw_state & CHNL_STATEEOS) &&
123 CHNL_IS_OUTPUT(pchnl->chnl_mode))
124 return -EPIPE;
125 /* No other possible states left */
126 DBC_ASSERT(0);
999e07d6
ORL
127 }
128
129 dev_obj = dev_get_first();
130 dev_get_bridge_context(dev_obj, &dev_ctxt);
131 if (!dev_ctxt)
d65c14b3 132 return -EFAULT;
999e07d6 133
daa89e6c
RS
134 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
135 if (!(host_buf < (void *)USERMODE_ADDR)) {
136 host_sys_buf = host_buf;
999e07d6
ORL
137 goto func_cont;
138 }
139 /* if addr in user mode, then copy to kernel space */
140 host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
d65c14b3
IN
141 if (host_sys_buf == NULL)
142 return -ENOMEM;
143
999e07d6 144 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
daa89e6c 145 status = copy_from_user(host_sys_buf, host_buf,
d65c14b3 146 buf_size);
999e07d6
ORL
147 if (status) {
148 kfree(host_sys_buf);
149 host_sys_buf = NULL;
d65c14b3 150 return -EFAULT;
999e07d6
ORL
151 }
152 }
153 }
154func_cont:
155 /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
156 * channels. DPCCS is held to avoid race conditions with PCPY channels.
157 * If DPC is scheduled in process context (iosm_schedule) and any
158 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
159 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
160 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
161 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
162 if (pchnl->chnl_type == CHNL_PCPY) {
163 /* This is a processor-copy channel. */
d65c14b3 164 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
999e07d6 165 /* Check buffer size on output channels for fit. */
d65c14b3 166 if (byte_size > io_buf_size(
121e8f9b 167 pchnl->chnl_mgr_obj->iomgr)) {
999e07d6 168 status = -EINVAL;
d65c14b3
IN
169 goto out;
170 }
3c6bf30f 171 }
999e07d6 172 }
999e07d6 173
d65c14b3
IN
174 /* Get a free chirp: */
175 if (list_empty(&pchnl->free_packets_list)) {
176 status = -EIO;
177 goto out;
999e07d6 178 }
d65c14b3
IN
179 chnl_packet_obj = list_first_entry(&pchnl->free_packets_list,
180 struct chnl_irp, link);
181 list_del(&chnl_packet_obj->link);
182
183 /* Enqueue the chirp on the chnl's IORequest queue: */
184 chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
185 host_buf;
186 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
187 chnl_packet_obj->host_sys_buf = host_sys_buf;
188
189 /*
190 * Note: for dma chans dw_dsp_addr contains dsp address
191 * of SM buffer.
192 */
193 DBC_ASSERT(chnl_mgr_obj->word_size != 0);
194 /* DSP address */
195 chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size;
196 chnl_packet_obj->byte_size = byte_size;
197 chnl_packet_obj->buf_size = buf_size;
198 /* Only valid for output channel */
b4da7fc3 199 chnl_packet_obj->arg = dw_arg;
d65c14b3
IN
200 chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
201 CHNL_IOCSTATCOMPLETE);
121e8f9b 202 list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests);
d65c14b3
IN
203 pchnl->cio_reqs++;
204 DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
205 /*
206 * If end of stream, update the channel state to prevent
207 * more IOR's.
208 */
209 if (is_eos)
3c882de5 210 pchnl->state |= CHNL_STATEEOS;
d65c14b3
IN
211
212 /* Legacy DSM Processor-Copy */
213 DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
214 /* Request IO from the DSP */
121e8f9b 215 io_request_chnl(chnl_mgr_obj->iomgr, pchnl,
d65c14b3
IN
216 (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
217 IO_OUTPUT), &mb_val);
218 sched_dpc = true;
219out:
999e07d6
ORL
220 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
221 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
222 if (mb_val != 0)
8204bd38 223 sm_interrupt_dsp(dev_ctxt, mb_val);
999e07d6
ORL
224
225 /* Schedule a DPC, to do the actual data transfer */
226 if (sched_dpc)
121e8f9b 227 iosm_schedule(chnl_mgr_obj->iomgr);
999e07d6 228
999e07d6
ORL
229 return status;
230}
231
232/*
233 * ======== bridge_chnl_cancel_io ========
234 * Return all I/O requests to the client which have not yet been
235 * transferred. The channel's I/O completion object is
236 * signalled, and all the I/O requests are queued as IOC's, with the
237 * status field set to CHNL_IOCSTATCANCEL.
238 * This call is typically used in abort situations, and is a prelude to
239 * chnl_close();
240 */
241int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
242{
999e07d6
ORL
243 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
244 u32 chnl_id = -1;
245 s8 chnl_mode;
3c6bf30f 246 struct chnl_irp *chirp, *tmp;
999e07d6
ORL
247 struct chnl_mgr *chnl_mgr_obj = NULL;
248
249 /* Check args: */
d65c14b3
IN
250 if (!pchnl || !pchnl->chnl_mgr_obj)
251 return -EFAULT;
252
253 chnl_id = pchnl->chnl_id;
254 chnl_mode = pchnl->chnl_mode;
255 chnl_mgr_obj = pchnl->chnl_mgr_obj;
999e07d6
ORL
256
257 /* Mark this channel as cancelled, to prevent further IORequests or
258 * IORequests or dispatching. */
259 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
d65c14b3 260
3c882de5 261 pchnl->state |= CHNL_STATECANCEL;
d65c14b3 262
121e8f9b 263 if (list_empty(&pchnl->io_requests)) {
d65c14b3
IN
264 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
265 return 0;
266 }
999e07d6
ORL
267
268 if (pchnl->chnl_type == CHNL_PCPY) {
269 /* Indicate we have no more buffers available for transfer: */
270 if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
121e8f9b 271 io_cancel_chnl(chnl_mgr_obj->iomgr, chnl_id);
999e07d6
ORL
272 } else {
273 /* Record that we no longer have output buffers
274 * available: */
5108de0a 275 chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
999e07d6
ORL
276 }
277 }
278 /* Move all IOR's to IOC queue: */
121e8f9b 279 list_for_each_entry_safe(chirp, tmp, &pchnl->io_requests, link) {
3c6bf30f
IN
280 list_del(&chirp->link);
281 chirp->byte_size = 0;
282 chirp->status |= CHNL_IOCSTATCANCEL;
121e8f9b 283 list_add_tail(&chirp->link, &pchnl->io_completions);
3c6bf30f
IN
284 pchnl->cio_cs++;
285 pchnl->cio_reqs--;
286 DBC_ASSERT(pchnl->cio_reqs >= 0);
999e07d6 287 }
d65c14b3 288
999e07d6 289 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
d65c14b3
IN
290
291 return 0;
999e07d6
ORL
292}
293
294/*
295 * ======== bridge_chnl_close ========
296 * Purpose:
297 * Ensures all pending I/O on this channel is cancelled, discards all
298 * queued I/O completion notifications, then frees the resources allocated
299 * for this channel, and makes the corresponding logical channel id
300 * available for subsequent use.
301 */
302int bridge_chnl_close(struct chnl_object *chnl_obj)
303{
304 int status;
305 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
306
307 /* Check args: */
d65c14b3
IN
308 if (!pchnl)
309 return -EFAULT;
310 /* Cancel IO: this ensures no further IO requests or notifications */
311 status = bridge_chnl_cancel_io(chnl_obj);
312 if (status)
313 return status;
314 /* Assert I/O on this channel is now cancelled: Protects from io_dpc */
3c882de5 315 DBC_ASSERT((pchnl->state & CHNL_STATECANCEL));
d65c14b3
IN
316 /* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
317 /* Free the slot in the channel manager: */
121e8f9b 318 pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL;
d65c14b3
IN
319 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
320 pchnl->chnl_mgr_obj->open_channels -= 1;
321 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
322 if (pchnl->ntfy_obj) {
323 ntfy_delete(pchnl->ntfy_obj);
324 kfree(pchnl->ntfy_obj);
325 pchnl->ntfy_obj = NULL;
999e07d6 326 }
d65c14b3
IN
327 /* Reset channel event: (NOTE: user_event freed in user context) */
328 if (pchnl->sync_event) {
329 sync_reset_event(pchnl->sync_event);
330 kfree(pchnl->sync_event);
331 pchnl->sync_event = NULL;
999e07d6 332 }
d65c14b3 333 /* Free I/O request and I/O completion queues: */
121e8f9b 334 free_chirp_list(&pchnl->io_completions);
d65c14b3
IN
335 pchnl->cio_cs = 0;
336
121e8f9b 337 free_chirp_list(&pchnl->io_requests);
d65c14b3 338 pchnl->cio_reqs = 0;
3c6bf30f 339
d65c14b3 340 free_chirp_list(&pchnl->free_packets_list);
3c6bf30f 341
d65c14b3
IN
342 /* Release channel object. */
343 kfree(pchnl);
3c6bf30f 344
999e07d6
ORL
345 return status;
346}
347
348/*
349 * ======== bridge_chnl_create ========
350 * Create a channel manager object, responsible for opening new channels
351 * and closing old ones for a given board.
352 */
e6bf74f0 353int bridge_chnl_create(struct chnl_mgr **channel_mgr,
999e07d6 354 struct dev_object *hdev_obj,
9d7d0a52 355 const struct chnl_mgrattrs *mgr_attrts)
999e07d6
ORL
356{
357 int status = 0;
358 struct chnl_mgr *chnl_mgr_obj = NULL;
359 u8 max_channels;
360
361 /* Check DBC requirements: */
a5120278 362 DBC_REQUIRE(channel_mgr != NULL);
fb6aabb7
RS
363 DBC_REQUIRE(mgr_attrts != NULL);
364 DBC_REQUIRE(mgr_attrts->max_channels > 0);
365 DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
366 DBC_REQUIRE(mgr_attrts->word_size != 0);
999e07d6
ORL
367
368 /* Allocate channel manager object */
369 chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
370 if (chnl_mgr_obj) {
371 /*
372 * The max_channels attr must equal the # of supported chnls for
373 * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
fb6aabb7 374 * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
999e07d6
ORL
375 * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
376 */
fb6aabb7 377 DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
999e07d6
ORL
378 max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
379 /* Create array of channels */
121e8f9b 380 chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *)
999e07d6 381 * max_channels, GFP_KERNEL);
121e8f9b 382 if (chnl_mgr_obj->channels) {
999e07d6 383 /* Initialize chnl_mgr object */
3c882de5 384 chnl_mgr_obj->type = CHNL_TYPESM;
fb6aabb7 385 chnl_mgr_obj->word_size = mgr_attrts->word_size;
999e07d6
ORL
386 /* Total # chnls supported */
387 chnl_mgr_obj->max_channels = max_channels;
388 chnl_mgr_obj->open_channels = 0;
5108de0a
RS
389 chnl_mgr_obj->output_mask = 0;
390 chnl_mgr_obj->last_output = 0;
085467b8 391 chnl_mgr_obj->dev_obj = hdev_obj;
e6486d8c 392 spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
999e07d6
ORL
393 } else {
394 status = -ENOMEM;
395 }
396 } else {
397 status = -ENOMEM;
398 }
399
d1b2216d 400 if (status) {
999e07d6 401 bridge_chnl_destroy(chnl_mgr_obj);
a5120278 402 *channel_mgr = NULL;
999e07d6
ORL
403 } else {
404 /* Return channel manager object to caller... */
a5120278 405 *channel_mgr = chnl_mgr_obj;
999e07d6
ORL
406 }
407 return status;
408}
409
410/*
411 * ======== bridge_chnl_destroy ========
412 * Purpose:
413 * Close all open channels, and destroy the channel manager.
414 */
415int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
416{
417 int status = 0;
418 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
419 u32 chnl_id;
420
421 if (hchnl_mgr) {
422 /* Close all open channels: */
423 for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
424 chnl_id++) {
425 status =
121e8f9b 426 bridge_chnl_close(chnl_mgr_obj->channels
999e07d6 427 [chnl_id]);
d1b2216d 428 if (status)
999e07d6
ORL
429 dev_dbg(bridge, "%s: Error status 0x%x\n",
430 __func__, status);
431 }
432
433 /* Free channel manager object: */
121e8f9b 434 kfree(chnl_mgr_obj->channels);
999e07d6
ORL
435
436 /* Set hchnl_mgr to NULL in device object. */
085467b8 437 dev_set_chnl_mgr(chnl_mgr_obj->dev_obj, NULL);
999e07d6
ORL
438 /* Free this Chnl Mgr object: */
439 kfree(hchnl_mgr);
440 } else {
441 status = -EFAULT;
442 }
443 return status;
444}
445
446/*
447 * ======== bridge_chnl_flush_io ========
448 * purpose:
449 * Flushes all the outstanding data requests on a channel.
450 */
b301c858 451int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
999e07d6
ORL
452{
453 int status = 0;
454 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
455 s8 chnl_mode = -1;
456 struct chnl_mgr *chnl_mgr_obj;
457 struct chnl_ioc chnl_ioc_obj;
458 /* Check args: */
459 if (pchnl) {
b301c858 460 if ((timeout == CHNL_IOCNOWAIT)
999e07d6
ORL
461 && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
462 status = -EINVAL;
463 } else {
464 chnl_mode = pchnl->chnl_mode;
465 chnl_mgr_obj = pchnl->chnl_mgr_obj;
466 }
467 } else {
468 status = -EFAULT;
469 }
e6486d8c 470 if (!status) {
999e07d6
ORL
471 /* Note: Currently, if another thread continues to add IO
472 * requests to this channel, this function will continue to
473 * flush all such queued IO requests. */
474 if (CHNL_IS_OUTPUT(chnl_mode)
475 && (pchnl->chnl_type == CHNL_PCPY)) {
476 /* Wait for IO completions, up to the specified
477 * timeout: */
121e8f9b 478 while (!list_empty(&pchnl->io_requests) && !status) {
999e07d6 479 status = bridge_chnl_get_ioc(chnl_obj,
b301c858 480 timeout, &chnl_ioc_obj);
d1b2216d 481 if (status)
999e07d6
ORL
482 continue;
483
484 if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
485 status = -ETIMEDOUT;
486
487 }
488 } else {
489 status = bridge_chnl_cancel_io(chnl_obj);
490 /* Now, leave the channel in the ready state: */
3c882de5 491 pchnl->state &= ~CHNL_STATECANCEL;
999e07d6
ORL
492 }
493 }
121e8f9b 494 DBC_ENSURE(status || list_empty(&pchnl->io_requests));
999e07d6
ORL
495 return status;
496}
497
498/*
499 * ======== bridge_chnl_get_info ========
500 * Purpose:
501 * Retrieve information related to a channel.
502 */
503int bridge_chnl_get_info(struct chnl_object *chnl_obj,
e6bf74f0 504 struct chnl_info *channel_info)
999e07d6
ORL
505{
506 int status = 0;
507 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
daa89e6c 508 if (channel_info != NULL) {
999e07d6
ORL
509 if (pchnl) {
510 /* Return the requested information: */
085467b8 511 channel_info->chnl_mgr = pchnl->chnl_mgr_obj;
daa89e6c
RS
512 channel_info->event_obj = pchnl->user_event;
513 channel_info->cnhl_id = pchnl->chnl_id;
5108de0a 514 channel_info->mode = pchnl->chnl_mode;
daa89e6c
RS
515 channel_info->bytes_tx = pchnl->bytes_moved;
516 channel_info->process = pchnl->process;
517 channel_info->sync_event = pchnl->sync_event;
518 channel_info->cio_cs = pchnl->cio_cs;
519 channel_info->cio_reqs = pchnl->cio_reqs;
3c882de5 520 channel_info->state = pchnl->state;
999e07d6
ORL
521 } else {
522 status = -EFAULT;
523 }
524 } else {
525 status = -EFAULT;
526 }
527 return status;
528}
529
530/*
531 * ======== bridge_chnl_get_ioc ========
532 * Optionally wait for I/O completion on a channel. Dequeue an I/O
533 * completion record, which contains information about the completed
534 * I/O request.
535 * Note: Ensures Channel Invariant (see notes above).
536 */
b301c858 537int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
e6bf74f0 538 struct chnl_ioc *chan_ioc)
999e07d6
ORL
539{
540 int status = 0;
541 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
542 struct chnl_irp *chnl_packet_obj;
543 int stat_sync;
544 bool dequeue_ioc = true;
545 struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
546 u8 *host_sys_buf = NULL;
547 struct bridge_dev_context *dev_ctxt;
548 struct dev_object *dev_obj;
549
550 /* Check args: */
daa89e6c 551 if (!chan_ioc || !pchnl) {
999e07d6 552 status = -EFAULT;
b301c858 553 } else if (timeout == CHNL_IOCNOWAIT) {
121e8f9b 554 if (list_empty(&pchnl->io_completions))
999e07d6
ORL
555 status = -EREMOTEIO;
556
557 }
558
559 dev_obj = dev_get_first();
560 dev_get_bridge_context(dev_obj, &dev_ctxt);
561 if (!dev_ctxt)
562 status = -EFAULT;
563
d1b2216d 564 if (status)
999e07d6
ORL
565 goto func_end;
566
567 ioc.status = CHNL_IOCSTATCOMPLETE;
b301c858 568 if (timeout !=
121e8f9b 569 CHNL_IOCNOWAIT && list_empty(&pchnl->io_completions)) {
b301c858
RS
570 if (timeout == CHNL_IOCINFINITE)
571 timeout = SYNC_INFINITE;
999e07d6 572
b301c858 573 stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
999e07d6
ORL
574 if (stat_sync == -ETIME) {
575 /* No response from DSP */
576 ioc.status |= CHNL_IOCSTATTIMEOUT;
577 dequeue_ioc = false;
578 } else if (stat_sync == -EPERM) {
579 /* This can occur when the user mode thread is
580 * aborted (^C), or when _VWIN32_WaitSingleObject()
25985edc 581 * fails due to unknown causes. */
999e07d6
ORL
582 /* Even though Wait failed, there may be something in
583 * the Q: */
121e8f9b 584 if (list_empty(&pchnl->io_completions)) {
999e07d6
ORL
585 ioc.status |= CHNL_IOCSTATCANCEL;
586 dequeue_ioc = false;
587 }
588 }
589 }
590 /* See comment in AddIOReq */
591 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
592 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
593 if (dequeue_ioc) {
daa89e6c 594 /* Dequeue IOC and set chan_ioc; */
121e8f9b
RS
595 DBC_ASSERT(!list_empty(&pchnl->io_completions));
596 chnl_packet_obj = list_first_entry(&pchnl->io_completions,
3c6bf30f
IN
597 struct chnl_irp, link);
598 list_del(&chnl_packet_obj->link);
daa89e6c 599 /* Update chan_ioc from channel state and chirp: */
3c6bf30f
IN
600 pchnl->cio_cs--;
601 /*
602 * If this is a zero-copy channel, then set IOC's pbuf
603 * to the DSP's address. This DSP address will get
604 * translated to user's virtual addr later.
605 */
606 host_sys_buf = chnl_packet_obj->host_sys_buf;
ee4317f7 607 ioc.buf = chnl_packet_obj->host_user_buf;
3c6bf30f
IN
608 ioc.byte_size = chnl_packet_obj->byte_size;
609 ioc.buf_size = chnl_packet_obj->buf_size;
b4da7fc3 610 ioc.arg = chnl_packet_obj->arg;
3c6bf30f
IN
611 ioc.status |= chnl_packet_obj->status;
612 /* Place the used chirp on the free list: */
613 list_add_tail(&chnl_packet_obj->link,
614 &pchnl->free_packets_list);
999e07d6 615 } else {
ee4317f7 616 ioc.buf = NULL;
999e07d6 617 ioc.byte_size = 0;
b4da7fc3 618 ioc.arg = 0;
999e07d6
ORL
619 ioc.buf_size = 0;
620 }
621 /* Ensure invariant: If any IOC's are queued for this channel... */
121e8f9b 622 if (!list_empty(&pchnl->io_completions)) {
999e07d6
ORL
623 /* Since DSPStream_Reclaim() does not take a timeout
624 * parameter, we pass the stream's timeout value to
625 * bridge_chnl_get_ioc. We cannot determine whether or not
626 * we have waited in User mode. Since the stream's timeout
627 * value may be non-zero, we still have to set the event.
628 * Therefore, this optimization is taken out.
629 *
b301c858 630 * if (timeout == CHNL_IOCNOWAIT) {
999e07d6
ORL
631 * ... ensure event is set..
632 * sync_set_event(pchnl->sync_event);
633 * } */
634 sync_set_event(pchnl->sync_event);
635 } else {
636 /* else, if list is empty, ensure event is reset. */
637 sync_reset_event(pchnl->sync_event);
638 }
639 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
640 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
641 if (dequeue_ioc
642 && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
ee4317f7 643 if (!(ioc.buf < (void *)USERMODE_ADDR))
999e07d6
ORL
644 goto func_cont;
645
646 /* If the addr is in user mode, then copy it */
ee4317f7 647 if (!host_sys_buf || !ioc.buf) {
999e07d6
ORL
648 status = -EFAULT;
649 goto func_cont;
650 }
651 if (!CHNL_IS_INPUT(pchnl->chnl_mode))
652 goto func_cont1;
653
654 /*host_user_buf */
ee4317f7 655 status = copy_to_user(ioc.buf, host_sys_buf, ioc.byte_size);
999e07d6
ORL
656 if (status) {
657 if (current->flags & PF_EXITING)
658 status = 0;
659 }
660 if (status)
661 status = -EFAULT;
662func_cont1:
663 kfree(host_sys_buf);
664 }
665func_cont:
666 /* Update User's IOC block: */
daa89e6c 667 *chan_ioc = ioc;
999e07d6
ORL
668func_end:
669 return status;
670}
671
672/*
673 * ======== bridge_chnl_get_mgr_info ========
674 * Retrieve information related to the channel manager.
675 */
0cd343a4 676int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
e6bf74f0 677 struct chnl_mgrinfo *mgr_info)
999e07d6 678{
999e07d6
ORL
679 struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
680
d65c14b3
IN
681 if (!mgr_info || !hchnl_mgr)
682 return -EFAULT;
999e07d6 683
d65c14b3
IN
684 if (ch_id > CHNL_MAXCHANNELS)
685 return -ECHRNG;
686
687 /* Return the requested information: */
121e8f9b 688 mgr_info->chnl_obj = chnl_mgr_obj->channels[ch_id];
d65c14b3 689 mgr_info->open_channels = chnl_mgr_obj->open_channels;
3c882de5 690 mgr_info->type = chnl_mgr_obj->type;
d65c14b3
IN
691 /* total # of chnls */
692 mgr_info->max_channels = chnl_mgr_obj->max_channels;
693
694 return 0;
999e07d6
ORL
695}
696
697/*
698 * ======== bridge_chnl_idle ========
699 * Idles a particular channel.
700 */
b301c858
RS
701int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
702 bool flush_data)
999e07d6
ORL
703{
704 s8 chnl_mode;
705 struct chnl_mgr *chnl_mgr_obj;
706 int status = 0;
707
708 DBC_REQUIRE(chnl_obj);
709
710 chnl_mode = chnl_obj->chnl_mode;
711 chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
712
b301c858 713 if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
999e07d6 714 /* Wait for IO completions, up to the specified timeout: */
b301c858 715 status = bridge_chnl_flush_io(chnl_obj, timeout);
999e07d6
ORL
716 } else {
717 status = bridge_chnl_cancel_io(chnl_obj);
718
719 /* Reset the byte count and put channel back in ready state. */
720 chnl_obj->bytes_moved = 0;
3c882de5 721 chnl_obj->state &= ~CHNL_STATECANCEL;
999e07d6
ORL
722 }
723
724 return status;
725}
726
727/*
728 * ======== bridge_chnl_open ========
729 * Open a new half-duplex channel to the DSP board.
730 */
e6bf74f0 731int bridge_chnl_open(struct chnl_object **chnl,
999e07d6 732 struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
9d7d0a52 733 u32 ch_id, const struct chnl_attr *pattrs)
999e07d6
ORL
734{
735 int status = 0;
736 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
737 struct chnl_object *pchnl = NULL;
738 struct sync_object *sync_event = NULL;
739 /* Ensure DBC requirements: */
a5120278 740 DBC_REQUIRE(chnl != NULL);
999e07d6
ORL
741 DBC_REQUIRE(pattrs != NULL);
742 DBC_REQUIRE(hchnl_mgr != NULL);
a5120278 743 *chnl = NULL;
d65c14b3 744
999e07d6 745 /* Validate Args: */
d65c14b3
IN
746 if (!pattrs->uio_reqs)
747 return -EINVAL;
748
749 if (!hchnl_mgr)
750 return -EFAULT;
751
752 if (ch_id != CHNL_PICKFREE) {
753 if (ch_id >= chnl_mgr_obj->max_channels)
754 return -ECHRNG;
121e8f9b 755 if (chnl_mgr_obj->channels[ch_id] != NULL)
d65c14b3 756 return -EALREADY;
999e07d6 757 } else {
d65c14b3
IN
758 /* Check for free channel */
759 status = search_free_channel(chnl_mgr_obj, &ch_id);
760 if (status)
761 return status;
999e07d6 762 }
999e07d6 763
0cd343a4 764 DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
d65c14b3 765
999e07d6
ORL
766 /* Create channel object: */
767 pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
d65c14b3
IN
768 if (!pchnl)
769 return -ENOMEM;
770
999e07d6 771 /* Protect queues from io_dpc: */
3c882de5 772 pchnl->state = CHNL_STATECANCEL;
d65c14b3 773
999e07d6 774 /* Allocate initial IOR and IOC queues: */
3c6bf30f
IN
775 status = create_chirp_list(&pchnl->free_packets_list,
776 pattrs->uio_reqs);
d65c14b3
IN
777 if (status)
778 goto out_err;
3c6bf30f 779
121e8f9b
RS
780 INIT_LIST_HEAD(&pchnl->io_requests);
781 INIT_LIST_HEAD(&pchnl->io_completions);
3c6bf30f 782
999e07d6
ORL
783 pchnl->chnl_packets = pattrs->uio_reqs;
784 pchnl->cio_cs = 0;
785 pchnl->cio_reqs = 0;
d65c14b3 786
999e07d6 787 sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
d65c14b3 788 if (!sync_event) {
999e07d6 789 status = -ENOMEM;
d65c14b3 790 goto out_err;
999e07d6 791 }
d65c14b3 792 sync_init_event(sync_event);
999e07d6 793
d65c14b3
IN
794 pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
795 if (!pchnl->ntfy_obj) {
796 status = -ENOMEM;
797 goto out_err;
999e07d6 798 }
d65c14b3
IN
799 ntfy_init(pchnl->ntfy_obj);
800
801 /* Initialize CHNL object fields: */
802 pchnl->chnl_mgr_obj = chnl_mgr_obj;
803 pchnl->chnl_id = ch_id;
804 pchnl->chnl_mode = chnl_mode;
805 pchnl->user_event = sync_event;
806 pchnl->sync_event = sync_event;
807 /* Get the process handle */
808 pchnl->process = current->tgid;
ee4317f7 809 pchnl->cb_arg = 0;
d65c14b3
IN
810 pchnl->bytes_moved = 0;
811 /* Default to proc-copy */
812 pchnl->chnl_type = CHNL_PCPY;
813
814 /* Insert channel object in channel manager: */
121e8f9b 815 chnl_mgr_obj->channels[pchnl->chnl_id] = pchnl;
d65c14b3
IN
816 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
817 chnl_mgr_obj->open_channels++;
818 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
819 /* Return result... */
3c882de5 820 pchnl->state = CHNL_STATEREADY;
d65c14b3 821 *chnl = pchnl;
999e07d6 822
d65c14b3
IN
823 return status;
824
825out_err:
826 /* Free memory */
121e8f9b
RS
827 free_chirp_list(&pchnl->io_completions);
828 free_chirp_list(&pchnl->io_requests);
d65c14b3
IN
829 free_chirp_list(&pchnl->free_packets_list);
830
9f7ff701 831 kfree(sync_event);
999e07d6 832
d65c14b3
IN
833 if (pchnl->ntfy_obj) {
834 ntfy_delete(pchnl->ntfy_obj);
835 kfree(pchnl->ntfy_obj);
836 pchnl->ntfy_obj = NULL;
999e07d6 837 }
d65c14b3
IN
838 kfree(pchnl);
839
999e07d6
ORL
840 return status;
841}
842
843/*
844 * ======== bridge_chnl_register_notify ========
845 * Registers for events on a particular channel.
846 */
847int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
848 u32 event_mask, u32 notify_type,
849 struct dsp_notification *hnotification)
850{
851 int status = 0;
852
853 DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
854
855 if (event_mask)
856 status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
857 event_mask, notify_type);
858 else
859 status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
860
861 return status;
862}
863
864/*
865 * ======== create_chirp_list ========
866 * Purpose:
867 * Initialize a queue of channel I/O Request/Completion packets.
868 * Parameters:
3c6bf30f 869 * list: Pointer to a list_head
0cd343a4 870 * chirps: Number of Chirps to allocate.
999e07d6 871 * Returns:
3c6bf30f 872 * 0 if successful, error code otherwise.
999e07d6
ORL
873 * Requires:
874 * Ensures:
875 */
3c6bf30f 876static int create_chirp_list(struct list_head *list, u32 chirps)
999e07d6 877{
3c6bf30f 878 struct chnl_irp *chirp;
999e07d6
ORL
879 u32 i;
880
3c6bf30f 881 INIT_LIST_HEAD(list);
999e07d6 882
3c6bf30f
IN
883 /* Make N chirps and place on queue. */
884 for (i = 0; i < chirps; i++) {
885 chirp = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
886 if (!chirp)
887 break;
888 list_add_tail(&chirp->link, list);
889 }
999e07d6 890
3c6bf30f
IN
891 /* If we couldn't allocate all chirps, free those allocated: */
892 if (i != chirps) {
893 free_chirp_list(list);
894 return -ENOMEM;
999e07d6
ORL
895 }
896
3c6bf30f 897 return 0;
999e07d6
ORL
898}
899
900/*
901 * ======== free_chirp_list ========
902 * Purpose:
903 * Free the queue of Chirps.
904 */
3c6bf30f 905static void free_chirp_list(struct list_head *chirp_list)
999e07d6 906{
3c6bf30f 907 struct chnl_irp *chirp, *tmp;
999e07d6 908
3c6bf30f 909 DBC_REQUIRE(chirp_list != NULL);
999e07d6 910
3c6bf30f
IN
911 list_for_each_entry_safe(chirp, tmp, chirp_list, link) {
912 list_del(&chirp->link);
913 kfree(chirp);
999e07d6 914 }
999e07d6
ORL
915}
916
917/*
918 * ======== search_free_channel ========
919 * Search for a free channel slot in the array of channel pointers.
920 */
921static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
e6bf74f0 922 u32 *chnl)
999e07d6
ORL
923{
924 int status = -ENOSR;
925 u32 i;
926
927 DBC_REQUIRE(chnl_mgr_obj);
928
929 for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
121e8f9b 930 if (chnl_mgr_obj->channels[i] == NULL) {
999e07d6 931 status = 0;
aa09b091 932 *chnl = i;
999e07d6
ORL
933 break;
934 }
935 }
936
937 return status;
938}