2 * Broadcom BM2835 V4L2 driver
4 * Copyright © 2013 Raspberry Pi (Trading) Ltd.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
10 * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
11 * Dave Stevenson <dsteve@broadcom.com>
12 * Simon Mellor <simellor@broadcom.com>
13 * Luke Diamand <luked@broadcom.com>
15 * V4L2 driver MMAL vchiq interface code
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/completion.h>
26 #include <linux/vmalloc.h>
27 #include <asm/cacheflush.h>
28 #include <media/videobuf2-vmalloc.h>
30 #include "mmal-common.h"
31 #include "mmal-vchiq.h"
35 #include "interface/vchi/vchi.h"
37 /* maximum number of components supported */
38 #define VCHIQ_MMAL_MAX_COMPONENTS 4
40 /*#define FULL_MSG_DUMP 1*/
43 static const char *const msg_type_names
[] = {
61 "GET_CORE_STATS_FOR_PORT",
65 "OPAQUE_ALLOCATOR_DESC",
68 "BUFFER_FROM_HOST_ZEROLEN",
74 static const char *const port_action_type_names
[] = {
85 #if defined(FULL_MSG_DUMP)
86 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
88 pr_debug(TITLE" type:%s(%d) length:%d\n", \
89 msg_type_names[(MSG)->h.type], \
90 (MSG)->h.type, (MSG_LEN)); \
91 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
93 sizeof(struct mmal_msg_header), 1); \
94 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
96 ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
97 (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
100 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
102 pr_debug(TITLE" type:%s(%d) length:%d\n", \
103 msg_type_names[(MSG)->h.type], \
104 (MSG)->h.type, (MSG_LEN)); \
108 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
111 /* normal message context */
112 struct mmal_msg_context
{
115 /* work struct for defered callback - must come first */
116 struct work_struct work
;
118 struct vchiq_mmal_instance
*instance
;
120 struct vchiq_mmal_port
*port
;
121 /* actual buffer used to store bulk reply */
122 struct mmal_buffer
*buffer
;
123 /* amount of buffer used */
124 unsigned long buffer_used
;
125 /* MMAL buffer flags */
127 /* Presentation and Decode timestamps */
131 int status
; /* context status */
133 } bulk
; /* bulk data */
136 /* message handle to release */
137 VCHI_HELD_MSG_T msg_handle
;
138 /* pointer to received message */
139 struct mmal_msg
*msg
;
140 /* received message length */
142 /* completion upon reply */
143 struct completion cmplt
;
144 } sync
; /* synchronous response */
149 struct vchiq_mmal_instance
{
150 VCHI_SERVICE_HANDLE_T handle
;
152 /* ensure serialised access to service */
153 struct mutex vchiq_mutex
;
155 /* ensure serialised access to bulk operations */
156 struct mutex bulk_mutex
;
158 /* vmalloc page to receive scratch bulk xfers into */
161 /* component to use next */
163 struct vchiq_mmal_component component
[VCHIQ_MMAL_MAX_COMPONENTS
];
166 static struct mmal_msg_context
*get_msg_context(struct vchiq_mmal_instance
169 struct mmal_msg_context
*msg_context
;
171 /* todo: should this be allocated from a pool to avoid kmalloc */
172 msg_context
= kmalloc(sizeof(*msg_context
), GFP_KERNEL
);
173 memset(msg_context
, 0, sizeof(*msg_context
));
178 static void release_msg_context(struct mmal_msg_context
*msg_context
)
183 /* deals with receipt of event to host message */
184 static void event_to_host_cb(struct vchiq_mmal_instance
*instance
,
185 struct mmal_msg
*msg
, u32 msg_len
)
187 pr_debug("unhandled event\n");
188 pr_debug("component:%p port type:%d num:%d cmd:0x%x length:%d\n",
189 msg
->u
.event_to_host
.client_component
,
190 msg
->u
.event_to_host
.port_type
,
191 msg
->u
.event_to_host
.port_num
,
192 msg
->u
.event_to_host
.cmd
, msg
->u
.event_to_host
.length
);
195 /* workqueue scheduled callback
197 * we do this because it is important we do not call any other vchiq
198 * sync calls from witin the message delivery thread
200 static void buffer_work_cb(struct work_struct
*work
)
202 struct mmal_msg_context
*msg_context
= (struct mmal_msg_context
*)work
;
204 msg_context
->u
.bulk
.port
->buffer_cb(msg_context
->u
.bulk
.instance
,
205 msg_context
->u
.bulk
.port
,
206 msg_context
->u
.bulk
.status
,
207 msg_context
->u
.bulk
.buffer
,
208 msg_context
->u
.bulk
.buffer_used
,
209 msg_context
->u
.bulk
.mmal_flags
,
210 msg_context
->u
.bulk
.dts
,
211 msg_context
->u
.bulk
.pts
);
213 /* release message context */
214 release_msg_context(msg_context
);
217 /* enqueue a bulk receive for a given message context */
218 static int bulk_receive(struct vchiq_mmal_instance
*instance
,
219 struct mmal_msg
*msg
,
220 struct mmal_msg_context
*msg_context
)
222 unsigned long rd_len
;
223 unsigned long flags
= 0;
226 /* bulk mutex stops other bulk operations while we have a
227 * receive in progress - released in callback
229 ret
= mutex_lock_interruptible(&instance
->bulk_mutex
);
233 rd_len
= msg
->u
.buffer_from_host
.buffer_header
.length
;
235 /* take buffer from queue */
236 spin_lock_irqsave(&msg_context
->u
.bulk
.port
->slock
, flags
);
237 if (list_empty(&msg_context
->u
.bulk
.port
->buffers
)) {
238 spin_unlock_irqrestore(&msg_context
->u
.bulk
.port
->slock
, flags
);
239 pr_err("buffer list empty trying to submit bulk receive\n");
241 /* todo: this is a serious error, we should never have
242 * commited a buffer_to_host operation to the mmal
243 * port without the buffer to back it up (underflow
244 * handling) and there is no obvious way to deal with
245 * this - how is the mmal servie going to react when
246 * we fail to do the xfer and reschedule a buffer when
247 * it arrives? perhaps a starved flag to indicate a
248 * waiting bulk receive?
251 mutex_unlock(&instance
->bulk_mutex
);
256 msg_context
->u
.bulk
.buffer
=
257 list_entry(msg_context
->u
.bulk
.port
->buffers
.next
,
258 struct mmal_buffer
, list
);
259 list_del(&msg_context
->u
.bulk
.buffer
->list
);
261 spin_unlock_irqrestore(&msg_context
->u
.bulk
.port
->slock
, flags
);
263 /* ensure we do not overrun the available buffer */
264 if (rd_len
> msg_context
->u
.bulk
.buffer
->buffer_size
) {
265 rd_len
= msg_context
->u
.bulk
.buffer
->buffer_size
;
266 pr_warn("short read as not enough receive buffer space\n");
267 /* todo: is this the correct response, what happens to
268 * the rest of the message data?
273 msg_context
->u
.bulk
.buffer_used
= rd_len
;
274 msg_context
->u
.bulk
.mmal_flags
=
275 msg
->u
.buffer_from_host
.buffer_header
.flags
;
276 msg_context
->u
.bulk
.dts
= msg
->u
.buffer_from_host
.buffer_header
.dts
;
277 msg_context
->u
.bulk
.pts
= msg
->u
.buffer_from_host
.buffer_header
.pts
;
279 // only need to flush L1 cache here, as VCHIQ takes care of the L2
281 __cpuc_flush_dcache_area(msg_context
->u
.bulk
.buffer
->buffer
, rd_len
);
283 /* queue the bulk submission */
284 vchi_service_use(instance
->handle
);
285 ret
= vchi_bulk_queue_receive(instance
->handle
,
286 msg_context
->u
.bulk
.buffer
->buffer
,
287 /* Actual receive needs to be a multiple
291 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
|
292 VCHI_FLAGS_BLOCK_UNTIL_QUEUED
,
295 vchi_service_release(instance
->handle
);
298 /* callback will not be clearing the mutex */
299 mutex_unlock(&instance
->bulk_mutex
);
305 /* enque a dummy bulk receive for a given message context */
306 static int dummy_bulk_receive(struct vchiq_mmal_instance
*instance
,
307 struct mmal_msg_context
*msg_context
)
311 /* bulk mutex stops other bulk operations while we have a
312 * receive in progress - released in callback
314 ret
= mutex_lock_interruptible(&instance
->bulk_mutex
);
318 /* zero length indicates this was a dummy transfer */
319 msg_context
->u
.bulk
.buffer_used
= 0;
321 /* queue the bulk submission */
322 vchi_service_use(instance
->handle
);
324 ret
= vchi_bulk_queue_receive(instance
->handle
,
325 instance
->bulk_scratch
,
327 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
|
328 VCHI_FLAGS_BLOCK_UNTIL_QUEUED
,
331 vchi_service_release(instance
->handle
);
334 /* callback will not be clearing the mutex */
335 mutex_unlock(&instance
->bulk_mutex
);
341 /* data in message, memcpy from packet into output buffer */
342 static int inline_receive(struct vchiq_mmal_instance
*instance
,
343 struct mmal_msg
*msg
,
344 struct mmal_msg_context
*msg_context
)
346 unsigned long flags
= 0;
348 /* take buffer from queue */
349 spin_lock_irqsave(&msg_context
->u
.bulk
.port
->slock
, flags
);
350 if (list_empty(&msg_context
->u
.bulk
.port
->buffers
)) {
351 spin_unlock_irqrestore(&msg_context
->u
.bulk
.port
->slock
, flags
);
352 pr_err("buffer list empty trying to receive inline\n");
354 /* todo: this is a serious error, we should never have
355 * commited a buffer_to_host operation to the mmal
356 * port without the buffer to back it up (with
357 * underflow handling) and there is no obvious way to
358 * deal with this. Less bad than the bulk case as we
359 * can just drop this on the floor but...unhelpful
364 msg_context
->u
.bulk
.buffer
=
365 list_entry(msg_context
->u
.bulk
.port
->buffers
.next
,
366 struct mmal_buffer
, list
);
367 list_del(&msg_context
->u
.bulk
.buffer
->list
);
369 spin_unlock_irqrestore(&msg_context
->u
.bulk
.port
->slock
, flags
);
371 memcpy(msg_context
->u
.bulk
.buffer
->buffer
,
372 msg
->u
.buffer_from_host
.short_data
,
373 msg
->u
.buffer_from_host
.payload_in_message
);
375 msg_context
->u
.bulk
.buffer_used
=
376 msg
->u
.buffer_from_host
.payload_in_message
;
381 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
383 buffer_from_host(struct vchiq_mmal_instance
*instance
,
384 struct vchiq_mmal_port
*port
, struct mmal_buffer
*buf
)
386 struct mmal_msg_context
*msg_context
;
390 pr_debug("instance:%p buffer:%p\n", instance
->handle
, buf
);
392 /* bulk mutex stops other bulk operations while we
393 * have a receive in progress
395 if (mutex_lock_interruptible(&instance
->bulk_mutex
))
399 msg_context
= get_msg_context(instance
);
400 if (msg_context
== NULL
)
403 /* store bulk message context for when data arrives */
404 msg_context
->u
.bulk
.instance
= instance
;
405 msg_context
->u
.bulk
.port
= port
;
406 msg_context
->u
.bulk
.buffer
= NULL
; /* not valid until bulk xfer */
407 msg_context
->u
.bulk
.buffer_used
= 0;
409 /* initialise work structure ready to schedule callback */
410 INIT_WORK(&msg_context
->u
.bulk
.work
, buffer_work_cb
);
412 /* prep the buffer from host message */
413 memset(&m
, 0xbc, sizeof(m
)); /* just to make debug clearer */
415 m
.h
.type
= MMAL_MSG_TYPE_BUFFER_FROM_HOST
;
416 m
.h
.magic
= MMAL_MAGIC
;
417 m
.h
.context
= msg_context
;
420 /* drvbuf is our private data passed back */
421 m
.u
.buffer_from_host
.drvbuf
.magic
= MMAL_MAGIC
;
422 m
.u
.buffer_from_host
.drvbuf
.component_handle
= port
->component
->handle
;
423 m
.u
.buffer_from_host
.drvbuf
.port_handle
= port
->handle
;
424 m
.u
.buffer_from_host
.drvbuf
.client_context
= msg_context
;
427 m
.u
.buffer_from_host
.buffer_header
.cmd
= 0;
428 m
.u
.buffer_from_host
.buffer_header
.data
= buf
->buffer
;
429 m
.u
.buffer_from_host
.buffer_header
.alloc_size
= buf
->buffer_size
;
430 m
.u
.buffer_from_host
.buffer_header
.length
= 0; /* nothing used yet */
431 m
.u
.buffer_from_host
.buffer_header
.offset
= 0; /* no offset */
432 m
.u
.buffer_from_host
.buffer_header
.flags
= 0; /* no flags */
433 m
.u
.buffer_from_host
.buffer_header
.pts
= MMAL_TIME_UNKNOWN
;
434 m
.u
.buffer_from_host
.buffer_header
.dts
= MMAL_TIME_UNKNOWN
;
436 /* clear buffer type sepecific data */
437 memset(&m
.u
.buffer_from_host
.buffer_header_type_specific
, 0,
438 sizeof(m
.u
.buffer_from_host
.buffer_header_type_specific
));
440 /* no payload in message */
441 m
.u
.buffer_from_host
.payload_in_message
= 0;
443 vchi_service_use(instance
->handle
);
445 ret
= vchi_msg_queue(instance
->handle
, &m
,
446 sizeof(struct mmal_msg_header
) +
447 sizeof(m
.u
.buffer_from_host
),
448 VCHI_FLAGS_BLOCK_UNTIL_QUEUED
, NULL
);
451 release_msg_context(msg_context
);
452 /* todo: is this correct error value? */
455 vchi_service_release(instance
->handle
);
457 mutex_unlock(&instance
->bulk_mutex
);
462 /* submit a buffer to the mmal sevice
464 * the buffer_from_host uses size data from the ports next available
465 * mmal_buffer and deals with there being no buffer available by
466 * incrementing the underflow for later
468 static int port_buffer_from_host(struct vchiq_mmal_instance
*instance
,
469 struct vchiq_mmal_port
*port
)
472 struct mmal_buffer
*buf
;
473 unsigned long flags
= 0;
478 /* peek buffer from queue */
479 spin_lock_irqsave(&port
->slock
, flags
);
480 if (list_empty(&port
->buffers
)) {
481 port
->buffer_underflow
++;
482 spin_unlock_irqrestore(&port
->slock
, flags
);
486 buf
= list_entry(port
->buffers
.next
, struct mmal_buffer
, list
);
488 spin_unlock_irqrestore(&port
->slock
, flags
);
490 /* issue buffer to mmal service */
491 ret
= buffer_from_host(instance
, port
, buf
);
493 pr_err("adding buffer header failed\n");
494 /* todo: how should this be dealt with */
500 /* deals with receipt of buffer to host message */
501 static void buffer_to_host_cb(struct vchiq_mmal_instance
*instance
,
502 struct mmal_msg
*msg
, u32 msg_len
)
504 struct mmal_msg_context
*msg_context
;
506 pr_debug("buffer_to_host_cb: instance:%p msg:%p msg_len:%d\n",
507 instance
, msg
, msg_len
);
509 if (msg
->u
.buffer_from_host
.drvbuf
.magic
== MMAL_MAGIC
) {
510 msg_context
= msg
->u
.buffer_from_host
.drvbuf
.client_context
;
512 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
516 if (msg
->h
.status
!= MMAL_MSG_STATUS_SUCCESS
) {
517 /* message reception had an error */
518 pr_warn("error %d in reply\n", msg
->h
.status
);
520 msg_context
->u
.bulk
.status
= msg
->h
.status
;
522 } else if (msg
->u
.buffer_from_host
.buffer_header
.length
== 0) {
524 if (msg
->u
.buffer_from_host
.buffer_header
.flags
&
525 MMAL_BUFFER_HEADER_FLAG_EOS
) {
526 msg_context
->u
.bulk
.status
=
527 dummy_bulk_receive(instance
, msg_context
);
528 if (msg_context
->u
.bulk
.status
== 0)
529 return; /* successful bulk submission, bulk
530 * completion will trigger callback
533 /* do callback with empty buffer - not EOS though */
534 msg_context
->u
.bulk
.status
= 0;
535 msg_context
->u
.bulk
.buffer_used
= 0;
537 } else if (msg
->u
.buffer_from_host
.payload_in_message
== 0) {
538 /* data is not in message, queue a bulk receive */
539 msg_context
->u
.bulk
.status
=
540 bulk_receive(instance
, msg
, msg_context
);
541 if (msg_context
->u
.bulk
.status
== 0)
542 return; /* successful bulk submission, bulk
543 * completion will trigger callback
546 /* failed to submit buffer, this will end badly */
547 pr_err("error %d on bulk submission\n",
548 msg_context
->u
.bulk
.status
);
550 } else if (msg
->u
.buffer_from_host
.payload_in_message
<=
551 MMAL_VC_SHORT_DATA
) {
552 /* data payload within message */
553 msg_context
->u
.bulk
.status
= inline_receive(instance
, msg
,
556 pr_err("message with invalid short payload\n");
559 msg_context
->u
.bulk
.status
= -EINVAL
;
560 msg_context
->u
.bulk
.buffer_used
=
561 msg
->u
.buffer_from_host
.payload_in_message
;
564 /* replace the buffer header */
565 port_buffer_from_host(instance
, msg_context
->u
.bulk
.port
);
567 /* schedule the port callback */
568 schedule_work(&msg_context
->u
.bulk
.work
);
571 static void bulk_receive_cb(struct vchiq_mmal_instance
*instance
,
572 struct mmal_msg_context
*msg_context
)
574 /* bulk receive operation complete */
575 mutex_unlock(&msg_context
->u
.bulk
.instance
->bulk_mutex
);
577 /* replace the buffer header */
578 port_buffer_from_host(msg_context
->u
.bulk
.instance
,
579 msg_context
->u
.bulk
.port
);
581 msg_context
->u
.bulk
.status
= 0;
583 /* schedule the port callback */
584 schedule_work(&msg_context
->u
.bulk
.work
);
587 static void bulk_abort_cb(struct vchiq_mmal_instance
*instance
,
588 struct mmal_msg_context
*msg_context
)
590 pr_err("%s: bulk ABORTED msg_context:%p\n", __func__
, msg_context
);
592 /* bulk receive operation complete */
593 mutex_unlock(&msg_context
->u
.bulk
.instance
->bulk_mutex
);
595 /* replace the buffer header */
596 port_buffer_from_host(msg_context
->u
.bulk
.instance
,
597 msg_context
->u
.bulk
.port
);
599 msg_context
->u
.bulk
.status
= -EINTR
;
601 schedule_work(&msg_context
->u
.bulk
.work
);
604 /* incoming event service callback */
605 static void service_callback(void *param
,
606 const VCHI_CALLBACK_REASON_T reason
,
609 struct vchiq_mmal_instance
*instance
= param
;
612 struct mmal_msg
*msg
;
613 VCHI_HELD_MSG_T msg_handle
;
616 pr_err("Message callback passed NULL instance\n");
621 case VCHI_CALLBACK_MSG_AVAILABLE
:
622 status
= vchi_msg_hold(instance
->handle
, (void **)&msg
,
623 &msg_len
, VCHI_FLAGS_NONE
, &msg_handle
);
625 pr_err("Unable to dequeue a message (%d)\n", status
);
629 DBG_DUMP_MSG(msg
, msg_len
, "<<< reply message");
631 /* handling is different for buffer messages */
632 switch (msg
->h
.type
) {
634 case MMAL_MSG_TYPE_BUFFER_FROM_HOST
:
635 vchi_held_msg_release(&msg_handle
);
638 case MMAL_MSG_TYPE_EVENT_TO_HOST
:
639 event_to_host_cb(instance
, msg
, msg_len
);
640 vchi_held_msg_release(&msg_handle
);
644 case MMAL_MSG_TYPE_BUFFER_TO_HOST
:
645 buffer_to_host_cb(instance
, msg
, msg_len
);
646 vchi_held_msg_release(&msg_handle
);
650 /* messages dependant on header context to complete */
652 /* todo: the msg.context really ought to be sanity
653 * checked before we just use it, afaict it comes back
654 * and is used raw from the videocore. Perhaps it
655 * should be verified the address lies in the kernel
658 if (msg
->h
.context
== NULL
) {
659 pr_err("received message context was null!\n");
660 vchi_held_msg_release(&msg_handle
);
664 /* fill in context values */
665 msg
->h
.context
->u
.sync
.msg_handle
= msg_handle
;
666 msg
->h
.context
->u
.sync
.msg
= msg
;
667 msg
->h
.context
->u
.sync
.msg_len
= msg_len
;
669 /* todo: should this check (completion_done()
670 * == 1) for no one waiting? or do we need a
671 * flag to tell us the completion has been
672 * interrupted so we can free the message and
673 * its context. This probably also solves the
674 * message arriving after interruption todo
678 /* complete message so caller knows it happened */
679 complete(&msg
->h
.context
->u
.sync
.cmplt
);
685 case VCHI_CALLBACK_BULK_RECEIVED
:
686 bulk_receive_cb(instance
, bulk_ctx
);
689 case VCHI_CALLBACK_BULK_RECEIVE_ABORTED
:
690 bulk_abort_cb(instance
, bulk_ctx
);
693 case VCHI_CALLBACK_SERVICE_CLOSED
:
694 /* TODO: consider if this requires action if received when
695 * driver is not explicitly closing the service
700 pr_err("Received unhandled message reason %d\n", reason
);
705 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance
*instance
,
706 struct mmal_msg
*msg
,
707 unsigned int payload_len
,
708 struct mmal_msg
**msg_out
,
709 VCHI_HELD_MSG_T
*msg_handle_out
)
711 struct mmal_msg_context msg_context
;
714 /* payload size must not cause message to exceed max size */
716 (MMAL_MSG_MAX_SIZE
- sizeof(struct mmal_msg_header
))) {
717 pr_err("payload length %d exceeds max:%d\n", payload_len
,
718 (MMAL_MSG_MAX_SIZE
- sizeof(struct mmal_msg_header
)));
722 init_completion(&msg_context
.u
.sync
.cmplt
);
724 msg
->h
.magic
= MMAL_MAGIC
;
725 msg
->h
.context
= &msg_context
;
728 DBG_DUMP_MSG(msg
, (sizeof(struct mmal_msg_header
) + payload_len
),
731 vchi_service_use(instance
->handle
);
733 ret
= vchi_msg_queue(instance
->handle
,
735 sizeof(struct mmal_msg_header
) + payload_len
,
736 VCHI_FLAGS_BLOCK_UNTIL_QUEUED
, NULL
);
738 vchi_service_release(instance
->handle
);
741 pr_err("error %d queuing message\n", ret
);
745 ret
= wait_for_completion_timeout(&msg_context
.u
.sync
.cmplt
, 3*HZ
);
747 pr_err("error %d waiting for sync completion\n", ret
);
750 /* todo: what happens if the message arrives after aborting */
754 *msg_out
= msg_context
.u
.sync
.msg
;
755 *msg_handle_out
= msg_context
.u
.sync
.msg_handle
;
760 static void dump_port_info(struct vchiq_mmal_port
*port
)
762 pr_debug("port handle:0x%x enabled:%d\n", port
->handle
, port
->enabled
);
764 pr_debug("buffer minimum num:%d size:%d align:%d\n",
765 port
->minimum_buffer
.num
,
766 port
->minimum_buffer
.size
, port
->minimum_buffer
.alignment
);
768 pr_debug("buffer recommended num:%d size:%d align:%d\n",
769 port
->recommended_buffer
.num
,
770 port
->recommended_buffer
.size
,
771 port
->recommended_buffer
.alignment
);
773 pr_debug("buffer current values num:%d size:%d align:%d\n",
774 port
->current_buffer
.num
,
775 port
->current_buffer
.size
, port
->current_buffer
.alignment
);
777 pr_debug("elementry stream: type:%d encoding:0x%x varient:0x%x\n",
779 port
->format
.encoding
, port
->format
.encoding_variant
);
781 pr_debug(" bitrate:%d flags:0x%x\n",
782 port
->format
.bitrate
, port
->format
.flags
);
784 if (port
->format
.type
== MMAL_ES_TYPE_VIDEO
) {
786 ("es video format: width:%d height:%d colourspace:0x%x\n",
787 port
->es
.video
.width
, port
->es
.video
.height
,
788 port
->es
.video
.color_space
);
790 pr_debug(" : crop xywh %d,%d,%d,%d\n",
791 port
->es
.video
.crop
.x
,
792 port
->es
.video
.crop
.y
,
793 port
->es
.video
.crop
.width
, port
->es
.video
.crop
.height
);
794 pr_debug(" : framerate %d/%d aspect %d/%d\n",
795 port
->es
.video
.frame_rate
.num
,
796 port
->es
.video
.frame_rate
.den
,
797 port
->es
.video
.par
.num
, port
->es
.video
.par
.den
);
801 static void port_to_mmal_msg(struct vchiq_mmal_port
*port
, struct mmal_port
*p
)
804 /* todo do readonly fields need setting at all? */
805 p
->type
= port
->type
;
806 p
->index
= port
->index
;
808 p
->is_enabled
= port
->enabled
;
809 p
->buffer_num_min
= port
->minimum_buffer
.num
;
810 p
->buffer_size_min
= port
->minimum_buffer
.size
;
811 p
->buffer_alignment_min
= port
->minimum_buffer
.alignment
;
812 p
->buffer_num_recommended
= port
->recommended_buffer
.num
;
813 p
->buffer_size_recommended
= port
->recommended_buffer
.size
;
815 /* only three writable fields in a port */
816 p
->buffer_num
= port
->current_buffer
.num
;
817 p
->buffer_size
= port
->current_buffer
.size
;
821 static int port_info_set(struct vchiq_mmal_instance
*instance
,
822 struct vchiq_mmal_port
*port
)
826 struct mmal_msg
*rmsg
;
827 VCHI_HELD_MSG_T rmsg_handle
;
829 pr_debug("setting port info port %p\n", port
);
832 dump_port_info(port
);
834 m
.h
.type
= MMAL_MSG_TYPE_PORT_INFO_SET
;
836 m
.u
.port_info_set
.component_handle
= port
->component
->handle
;
837 m
.u
.port_info_set
.port_type
= port
->type
;
838 m
.u
.port_info_set
.port_index
= port
->index
;
840 port_to_mmal_msg(port
, &m
.u
.port_info_set
.port
);
842 /* elementry stream format setup */
843 m
.u
.port_info_set
.format
.type
= port
->format
.type
;
844 m
.u
.port_info_set
.format
.encoding
= port
->format
.encoding
;
845 m
.u
.port_info_set
.format
.encoding_variant
=
846 port
->format
.encoding_variant
;
847 m
.u
.port_info_set
.format
.bitrate
= port
->format
.bitrate
;
848 m
.u
.port_info_set
.format
.flags
= port
->format
.flags
;
850 memcpy(&m
.u
.port_info_set
.es
, &port
->es
,
851 sizeof(union mmal_es_specific_format
));
853 m
.u
.port_info_set
.format
.extradata_size
= port
->format
.extradata_size
;
854 memcpy(&m
.u
.port_info_set
.extradata
, port
->format
.extradata
,
855 port
->format
.extradata_size
);
857 ret
= send_synchronous_mmal_msg(instance
, &m
,
858 sizeof(m
.u
.port_info_set
),
859 &rmsg
, &rmsg_handle
);
863 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_INFO_SET
) {
864 /* got an unexpected message type in reply */
869 /* return operation status */
870 ret
= -rmsg
->u
.port_info_get_reply
.status
;
872 pr_debug("%s:result:%d component:0x%x port:%d\n", __func__
, ret
,
873 port
->component
->handle
, port
->handle
);
876 vchi_held_msg_release(&rmsg_handle
);
882 /* use port info get message to retrive port information */
883 static int port_info_get(struct vchiq_mmal_instance
*instance
,
884 struct vchiq_mmal_port
*port
)
888 struct mmal_msg
*rmsg
;
889 VCHI_HELD_MSG_T rmsg_handle
;
892 m
.h
.type
= MMAL_MSG_TYPE_PORT_INFO_GET
;
893 m
.u
.port_info_get
.component_handle
= port
->component
->handle
;
894 m
.u
.port_info_get
.port_type
= port
->type
;
895 m
.u
.port_info_get
.index
= port
->index
;
897 ret
= send_synchronous_mmal_msg(instance
, &m
,
898 sizeof(m
.u
.port_info_get
),
899 &rmsg
, &rmsg_handle
);
903 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_INFO_GET
) {
904 /* got an unexpected message type in reply */
909 /* return operation status */
910 ret
= -rmsg
->u
.port_info_get_reply
.status
;
911 if (ret
!= MMAL_MSG_STATUS_SUCCESS
)
914 if (rmsg
->u
.port_info_get_reply
.port
.is_enabled
== 0)
915 port
->enabled
= false;
917 port
->enabled
= true;
919 /* copy the values out of the message */
920 port
->handle
= rmsg
->u
.port_info_get_reply
.port_handle
;
922 /* port type and index cached to use on port info set becuase
923 * it does not use a port handle
925 port
->type
= rmsg
->u
.port_info_get_reply
.port_type
;
926 port
->index
= rmsg
->u
.port_info_get_reply
.port_index
;
928 port
->minimum_buffer
.num
=
929 rmsg
->u
.port_info_get_reply
.port
.buffer_num_min
;
930 port
->minimum_buffer
.size
=
931 rmsg
->u
.port_info_get_reply
.port
.buffer_size_min
;
932 port
->minimum_buffer
.alignment
=
933 rmsg
->u
.port_info_get_reply
.port
.buffer_alignment_min
;
935 port
->recommended_buffer
.alignment
=
936 rmsg
->u
.port_info_get_reply
.port
.buffer_alignment_min
;
937 port
->recommended_buffer
.num
=
938 rmsg
->u
.port_info_get_reply
.port
.buffer_num_recommended
;
940 port
->current_buffer
.num
= rmsg
->u
.port_info_get_reply
.port
.buffer_num
;
941 port
->current_buffer
.size
=
942 rmsg
->u
.port_info_get_reply
.port
.buffer_size
;
945 port
->format
.type
= rmsg
->u
.port_info_get_reply
.format
.type
;
946 port
->format
.encoding
= rmsg
->u
.port_info_get_reply
.format
.encoding
;
947 port
->format
.encoding_variant
=
948 rmsg
->u
.port_info_get_reply
.format
.encoding_variant
;
949 port
->format
.bitrate
= rmsg
->u
.port_info_get_reply
.format
.bitrate
;
950 port
->format
.flags
= rmsg
->u
.port_info_get_reply
.format
.flags
;
952 /* elementry stream format */
954 &rmsg
->u
.port_info_get_reply
.es
,
955 sizeof(union mmal_es_specific_format
));
956 port
->format
.es
= &port
->es
;
958 port
->format
.extradata_size
=
959 rmsg
->u
.port_info_get_reply
.format
.extradata_size
;
960 memcpy(port
->format
.extradata
,
961 rmsg
->u
.port_info_get_reply
.extradata
,
962 port
->format
.extradata_size
);
964 pr_debug("received port info\n");
965 dump_port_info(port
);
969 pr_debug("%s:result:%d component:0x%x port:%d\n",
970 __func__
, ret
, port
->component
->handle
, port
->handle
);
972 vchi_held_msg_release(&rmsg_handle
);
977 /* create comonent on vc */
978 static int create_component(struct vchiq_mmal_instance
*instance
,
979 struct vchiq_mmal_component
*component
,
984 struct mmal_msg
*rmsg
;
985 VCHI_HELD_MSG_T rmsg_handle
;
987 /* build component create message */
988 m
.h
.type
= MMAL_MSG_TYPE_COMPONENT_CREATE
;
989 m
.u
.component_create
.client_component
= component
;
990 strncpy(m
.u
.component_create
.name
, name
,
991 sizeof(m
.u
.component_create
.name
));
993 ret
= send_synchronous_mmal_msg(instance
, &m
,
994 sizeof(m
.u
.component_create
),
995 &rmsg
, &rmsg_handle
);
999 if (rmsg
->h
.type
!= m
.h
.type
) {
1000 /* got an unexpected message type in reply */
1005 ret
= -rmsg
->u
.component_create_reply
.status
;
1006 if (ret
!= MMAL_MSG_STATUS_SUCCESS
)
1009 /* a valid component response received */
1010 component
->handle
= rmsg
->u
.component_create_reply
.component_handle
;
1011 component
->inputs
= rmsg
->u
.component_create_reply
.input_num
;
1012 component
->outputs
= rmsg
->u
.component_create_reply
.output_num
;
1013 component
->clocks
= rmsg
->u
.component_create_reply
.clock_num
;
1015 pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
1017 component
->inputs
, component
->outputs
, component
->clocks
);
1020 vchi_held_msg_release(&rmsg_handle
);
1025 /* destroys a component on vc */
1026 static int destroy_component(struct vchiq_mmal_instance
*instance
,
1027 struct vchiq_mmal_component
*component
)
1031 struct mmal_msg
*rmsg
;
1032 VCHI_HELD_MSG_T rmsg_handle
;
1034 m
.h
.type
= MMAL_MSG_TYPE_COMPONENT_DESTROY
;
1035 m
.u
.component_destroy
.component_handle
= component
->handle
;
1037 ret
= send_synchronous_mmal_msg(instance
, &m
,
1038 sizeof(m
.u
.component_destroy
),
1039 &rmsg
, &rmsg_handle
);
1043 if (rmsg
->h
.type
!= m
.h
.type
) {
1044 /* got an unexpected message type in reply */
1049 ret
= -rmsg
->u
.component_destroy_reply
.status
;
1053 vchi_held_msg_release(&rmsg_handle
);
1058 /* enable a component on vc */
1059 static int enable_component(struct vchiq_mmal_instance
*instance
,
1060 struct vchiq_mmal_component
*component
)
1064 struct mmal_msg
*rmsg
;
1065 VCHI_HELD_MSG_T rmsg_handle
;
1067 m
.h
.type
= MMAL_MSG_TYPE_COMPONENT_ENABLE
;
1068 m
.u
.component_enable
.component_handle
= component
->handle
;
1070 ret
= send_synchronous_mmal_msg(instance
, &m
,
1071 sizeof(m
.u
.component_enable
),
1072 &rmsg
, &rmsg_handle
);
1076 if (rmsg
->h
.type
!= m
.h
.type
) {
1077 /* got an unexpected message type in reply */
1082 ret
= -rmsg
->u
.component_enable_reply
.status
;
1085 vchi_held_msg_release(&rmsg_handle
);
1090 /* disable a component on vc */
1091 static int disable_component(struct vchiq_mmal_instance
*instance
,
1092 struct vchiq_mmal_component
*component
)
1096 struct mmal_msg
*rmsg
;
1097 VCHI_HELD_MSG_T rmsg_handle
;
1099 m
.h
.type
= MMAL_MSG_TYPE_COMPONENT_DISABLE
;
1100 m
.u
.component_disable
.component_handle
= component
->handle
;
1102 ret
= send_synchronous_mmal_msg(instance
, &m
,
1103 sizeof(m
.u
.component_disable
),
1104 &rmsg
, &rmsg_handle
);
1108 if (rmsg
->h
.type
!= m
.h
.type
) {
1109 /* got an unexpected message type in reply */
1114 ret
= -rmsg
->u
.component_disable_reply
.status
;
1118 vchi_held_msg_release(&rmsg_handle
);
1123 /* get version of mmal implementation */
1124 static int get_version(struct vchiq_mmal_instance
*instance
,
1125 u32
*major_out
, u32
*minor_out
)
1129 struct mmal_msg
*rmsg
;
1130 VCHI_HELD_MSG_T rmsg_handle
;
1132 m
.h
.type
= MMAL_MSG_TYPE_GET_VERSION
;
1134 ret
= send_synchronous_mmal_msg(instance
, &m
,
1135 sizeof(m
.u
.version
),
1136 &rmsg
, &rmsg_handle
);
1140 if (rmsg
->h
.type
!= m
.h
.type
) {
1141 /* got an unexpected message type in reply */
1146 *major_out
= rmsg
->u
.version
.major
;
1147 *minor_out
= rmsg
->u
.version
.minor
;
1150 vchi_held_msg_release(&rmsg_handle
);
1155 /* do a port action with a port as a parameter */
1156 static int port_action_port(struct vchiq_mmal_instance
*instance
,
1157 struct vchiq_mmal_port
*port
,
1158 enum mmal_msg_port_action_type action_type
)
1162 struct mmal_msg
*rmsg
;
1163 VCHI_HELD_MSG_T rmsg_handle
;
1165 m
.h
.type
= MMAL_MSG_TYPE_PORT_ACTION
;
1166 m
.u
.port_action_port
.component_handle
= port
->component
->handle
;
1167 m
.u
.port_action_port
.port_handle
= port
->handle
;
1168 m
.u
.port_action_port
.action
= action_type
;
1170 port_to_mmal_msg(port
, &m
.u
.port_action_port
.port
);
1172 ret
= send_synchronous_mmal_msg(instance
, &m
,
1173 sizeof(m
.u
.port_action_port
),
1174 &rmsg
, &rmsg_handle
);
1178 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_ACTION
) {
1179 /* got an unexpected message type in reply */
1184 ret
= -rmsg
->u
.port_action_reply
.status
;
1186 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1188 ret
, port
->component
->handle
, port
->handle
,
1189 port_action_type_names
[action_type
], action_type
);
1192 vchi_held_msg_release(&rmsg_handle
);
1197 /* do a port action with handles as parameters */
1198 static int port_action_handle(struct vchiq_mmal_instance
*instance
,
1199 struct vchiq_mmal_port
*port
,
1200 enum mmal_msg_port_action_type action_type
,
1201 u32 connect_component_handle
,
1202 u32 connect_port_handle
)
1206 struct mmal_msg
*rmsg
;
1207 VCHI_HELD_MSG_T rmsg_handle
;
1209 m
.h
.type
= MMAL_MSG_TYPE_PORT_ACTION
;
1211 m
.u
.port_action_handle
.component_handle
= port
->component
->handle
;
1212 m
.u
.port_action_handle
.port_handle
= port
->handle
;
1213 m
.u
.port_action_handle
.action
= action_type
;
1215 m
.u
.port_action_handle
.connect_component_handle
=
1216 connect_component_handle
;
1217 m
.u
.port_action_handle
.connect_port_handle
= connect_port_handle
;
1219 ret
= send_synchronous_mmal_msg(instance
, &m
,
1220 sizeof(m
.u
.port_action_handle
),
1221 &rmsg
, &rmsg_handle
);
1225 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_ACTION
) {
1226 /* got an unexpected message type in reply */
1231 ret
= -rmsg
->u
.port_action_reply
.status
;
1233 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)" \
1234 " connect component:0x%x connect port:%d\n",
1236 ret
, port
->component
->handle
, port
->handle
,
1237 port_action_type_names
[action_type
],
1238 action_type
, connect_component_handle
, connect_port_handle
);
1241 vchi_held_msg_release(&rmsg_handle
);
1246 static int port_parameter_set(struct vchiq_mmal_instance
*instance
,
1247 struct vchiq_mmal_port
*port
,
1248 u32 parameter_id
, void *value
, u32 value_size
)
1252 struct mmal_msg
*rmsg
;
1253 VCHI_HELD_MSG_T rmsg_handle
;
1255 m
.h
.type
= MMAL_MSG_TYPE_PORT_PARAMETER_SET
;
1257 m
.u
.port_parameter_set
.component_handle
= port
->component
->handle
;
1258 m
.u
.port_parameter_set
.port_handle
= port
->handle
;
1259 m
.u
.port_parameter_set
.id
= parameter_id
;
1260 m
.u
.port_parameter_set
.size
= (2 * sizeof(u32
)) + value_size
;
1261 memcpy(&m
.u
.port_parameter_set
.value
, value
, value_size
);
1263 ret
= send_synchronous_mmal_msg(instance
, &m
,
1264 (4 * sizeof(u32
)) + value_size
,
1265 &rmsg
, &rmsg_handle
);
1269 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_PARAMETER_SET
) {
1270 /* got an unexpected message type in reply */
1275 ret
= -rmsg
->u
.port_parameter_set_reply
.status
;
1277 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1279 ret
, port
->component
->handle
, port
->handle
, parameter_id
);
1282 vchi_held_msg_release(&rmsg_handle
);
1287 static int port_parameter_get(struct vchiq_mmal_instance
*instance
,
1288 struct vchiq_mmal_port
*port
,
1289 u32 parameter_id
, void *value
, u32
*value_size
)
1293 struct mmal_msg
*rmsg
;
1294 VCHI_HELD_MSG_T rmsg_handle
;
1297 m
.h
.type
= MMAL_MSG_TYPE_PORT_PARAMETER_GET
;
1299 m
.u
.port_parameter_get
.component_handle
= port
->component
->handle
;
1300 m
.u
.port_parameter_get
.port_handle
= port
->handle
;
1301 m
.u
.port_parameter_get
.id
= parameter_id
;
1302 m
.u
.port_parameter_get
.size
= (2 * sizeof(u32
)) + *value_size
;
1304 ret
= send_synchronous_mmal_msg(instance
, &m
,
1306 mmal_msg_port_parameter_get
),
1307 &rmsg
, &rmsg_handle
);
1311 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_PARAMETER_GET
) {
1312 /* got an unexpected message type in reply */
1313 pr_err("Incorrect reply type %d\n", rmsg
->h
.type
);
1318 ret
= -rmsg
->u
.port_parameter_get_reply
.status
;
1320 * port_parameter_get_reply.size includes the header,
1321 * whilst *value_size doesn't.
1323 reply_size
= rmsg
->u
.port_parameter_get_reply
.size
- (2 * sizeof(u32
));
1325 if (ret
|| (reply_size
> *value_size
)) {
1326 /* Copy only as much as we have space for
1327 * but report true size of parameter
1329 memcpy(value
, &rmsg
->u
.port_parameter_get_reply
.value
,
1332 memcpy(value
, &rmsg
->u
.port_parameter_get_reply
.value
,
1336 * Return amount of data copied if big enough,
1337 * or wanted if not big enough.
1339 *value_size
= reply_size
;
1341 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__
,
1342 ret
, port
->component
->handle
, port
->handle
, parameter_id
);
1345 vchi_held_msg_release(&rmsg_handle
);
1350 /* disables a port and drains buffers from it */
1351 static int port_disable(struct vchiq_mmal_instance
*instance
,
1352 struct vchiq_mmal_port
*port
)
1355 struct list_head
*q
, *buf_head
;
1356 unsigned long flags
= 0;
1361 port
->enabled
= false;
1363 ret
= port_action_port(instance
, port
,
1364 MMAL_MSG_PORT_ACTION_TYPE_DISABLE
);
1367 /* drain all queued buffers on port */
1368 spin_lock_irqsave(&port
->slock
, flags
);
1370 list_for_each_safe(buf_head
, q
, &port
->buffers
) {
1371 struct mmal_buffer
*mmalbuf
;
1372 mmalbuf
= list_entry(buf_head
, struct mmal_buffer
,
1375 if (port
->buffer_cb
)
1376 port
->buffer_cb(instance
,
1377 port
, 0, mmalbuf
, 0, 0,
1382 spin_unlock_irqrestore(&port
->slock
, flags
);
1384 ret
= port_info_get(instance
, port
);
1391 static int port_enable(struct vchiq_mmal_instance
*instance
,
1392 struct vchiq_mmal_port
*port
)
1394 unsigned int hdr_count
;
1395 struct list_head
*buf_head
;
1401 /* ensure there are enough buffers queued to cover the buffer headers */
1402 if (port
->buffer_cb
!= NULL
) {
1404 list_for_each(buf_head
, &port
->buffers
) {
1407 if (hdr_count
< port
->current_buffer
.num
)
1411 ret
= port_action_port(instance
, port
,
1412 MMAL_MSG_PORT_ACTION_TYPE_ENABLE
);
1416 port
->enabled
= true;
1418 if (port
->buffer_cb
) {
1419 /* send buffer headers to videocore */
1421 list_for_each(buf_head
, &port
->buffers
) {
1422 struct mmal_buffer
*mmalbuf
;
1423 mmalbuf
= list_entry(buf_head
, struct mmal_buffer
,
1425 ret
= buffer_from_host(instance
, port
, mmalbuf
);
1430 if (hdr_count
> port
->current_buffer
.num
)
1435 ret
= port_info_get(instance
, port
);
1441 /* ------------------------------------------------------------------
1443 *------------------------------------------------------------------*/
1445 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance
*instance
,
1446 struct vchiq_mmal_port
*port
)
1450 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1453 ret
= port_info_set(instance
, port
);
1455 goto release_unlock
;
1457 /* read what has actually been set */
1458 ret
= port_info_get(instance
, port
);
1461 mutex_unlock(&instance
->vchiq_mutex
);
1467 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance
*instance
,
1468 struct vchiq_mmal_port
*port
,
1469 u32 parameter
, void *value
, u32 value_size
)
1473 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1476 ret
= port_parameter_set(instance
, port
, parameter
, value
, value_size
);
1478 mutex_unlock(&instance
->vchiq_mutex
);
1483 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance
*instance
,
1484 struct vchiq_mmal_port
*port
,
1485 u32 parameter
, void *value
, u32
*value_size
)
1489 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1492 ret
= port_parameter_get(instance
, port
, parameter
, value
, value_size
);
1494 mutex_unlock(&instance
->vchiq_mutex
);
1501 * enables a port and queues buffers for satisfying callbacks if we
1502 * provide a callback handler
1504 int vchiq_mmal_port_enable(struct vchiq_mmal_instance
*instance
,
1505 struct vchiq_mmal_port
*port
,
1506 vchiq_mmal_buffer_cb buffer_cb
)
1510 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1513 /* already enabled - noop */
1514 if (port
->enabled
) {
1519 port
->buffer_cb
= buffer_cb
;
1521 ret
= port_enable(instance
, port
);
1524 mutex_unlock(&instance
->vchiq_mutex
);
1529 int vchiq_mmal_port_disable(struct vchiq_mmal_instance
*instance
,
1530 struct vchiq_mmal_port
*port
)
1534 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1537 if (!port
->enabled
) {
1538 mutex_unlock(&instance
->vchiq_mutex
);
1542 ret
= port_disable(instance
, port
);
1544 mutex_unlock(&instance
->vchiq_mutex
);
1549 /* ports will be connected in a tunneled manner so data buffers
1550 * are not handled by client.
1552 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance
*instance
,
1553 struct vchiq_mmal_port
*src
,
1554 struct vchiq_mmal_port
*dst
)
1558 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1561 /* disconnect ports if connected */
1562 if (src
->connected
!= NULL
) {
1563 ret
= port_disable(instance
, src
);
1565 pr_err("failed disabling src port(%d)\n", ret
);
1566 goto release_unlock
;
1569 /* do not need to disable the destination port as they
1570 * are connected and it is done automatically
1573 ret
= port_action_handle(instance
, src
,
1574 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT
,
1575 src
->connected
->component
->handle
,
1576 src
->connected
->handle
);
1578 pr_err("failed disconnecting src port\n");
1579 goto release_unlock
;
1581 src
->connected
->enabled
= false;
1582 src
->connected
= NULL
;
1586 /* do not make new connection */
1588 pr_debug("not making new connection\n");
1589 goto release_unlock
;
1592 /* copy src port format to dst */
1593 dst
->format
.encoding
= src
->format
.encoding
;
1594 dst
->es
.video
.width
= src
->es
.video
.width
;
1595 dst
->es
.video
.height
= src
->es
.video
.height
;
1596 dst
->es
.video
.crop
.x
= src
->es
.video
.crop
.x
;
1597 dst
->es
.video
.crop
.y
= src
->es
.video
.crop
.y
;
1598 dst
->es
.video
.crop
.width
= src
->es
.video
.crop
.width
;
1599 dst
->es
.video
.crop
.height
= src
->es
.video
.crop
.height
;
1600 dst
->es
.video
.frame_rate
.num
= src
->es
.video
.frame_rate
.num
;
1601 dst
->es
.video
.frame_rate
.den
= src
->es
.video
.frame_rate
.den
;
1603 /* set new format */
1604 ret
= port_info_set(instance
, dst
);
1606 pr_debug("setting port info failed\n");
1607 goto release_unlock
;
1610 /* read what has actually been set */
1611 ret
= port_info_get(instance
, dst
);
1613 pr_debug("read back port info failed\n");
1614 goto release_unlock
;
1617 /* connect two ports together */
1618 ret
= port_action_handle(instance
, src
,
1619 MMAL_MSG_PORT_ACTION_TYPE_CONNECT
,
1620 dst
->component
->handle
, dst
->handle
);
1622 pr_debug("connecting port %d:%d to %d:%d failed\n",
1623 src
->component
->handle
, src
->handle
,
1624 dst
->component
->handle
, dst
->handle
);
1625 goto release_unlock
;
1627 src
->connected
= dst
;
1631 mutex_unlock(&instance
->vchiq_mutex
);
1636 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance
*instance
,
1637 struct vchiq_mmal_port
*port
,
1638 struct mmal_buffer
*buffer
)
1640 unsigned long flags
= 0;
1642 spin_lock_irqsave(&port
->slock
, flags
);
1643 list_add_tail(&buffer
->list
, &port
->buffers
);
1644 spin_unlock_irqrestore(&port
->slock
, flags
);
1646 /* the port previously underflowed because it was missing a
1647 * mmal_buffer which has just been added, submit that buffer
1648 * to the mmal service.
1650 if (port
->buffer_underflow
) {
1651 port_buffer_from_host(instance
, port
);
1652 port
->buffer_underflow
--;
1658 /* Initialise a mmal component and its ports
1661 int vchiq_mmal_component_init(struct vchiq_mmal_instance
*instance
,
1663 struct vchiq_mmal_component
**component_out
)
1666 int idx
; /* port index */
1667 struct vchiq_mmal_component
*component
;
1669 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1672 if (instance
->component_idx
== VCHIQ_MMAL_MAX_COMPONENTS
) {
1673 ret
= -EINVAL
; /* todo is this correct error? */
1677 component
= &instance
->component
[instance
->component_idx
];
1679 ret
= create_component(instance
, component
, name
);
1683 /* ports info needs gathering */
1684 component
->control
.type
= MMAL_PORT_TYPE_CONTROL
;
1685 component
->control
.index
= 0;
1686 component
->control
.component
= component
;
1687 spin_lock_init(&component
->control
.slock
);
1688 INIT_LIST_HEAD(&component
->control
.buffers
);
1689 ret
= port_info_get(instance
, &component
->control
);
1691 goto release_component
;
1693 for (idx
= 0; idx
< component
->inputs
; idx
++) {
1694 component
->input
[idx
].type
= MMAL_PORT_TYPE_INPUT
;
1695 component
->input
[idx
].index
= idx
;
1696 component
->input
[idx
].component
= component
;
1697 spin_lock_init(&component
->input
[idx
].slock
);
1698 INIT_LIST_HEAD(&component
->input
[idx
].buffers
);
1699 ret
= port_info_get(instance
, &component
->input
[idx
]);
1701 goto release_component
;
1704 for (idx
= 0; idx
< component
->outputs
; idx
++) {
1705 component
->output
[idx
].type
= MMAL_PORT_TYPE_OUTPUT
;
1706 component
->output
[idx
].index
= idx
;
1707 component
->output
[idx
].component
= component
;
1708 spin_lock_init(&component
->output
[idx
].slock
);
1709 INIT_LIST_HEAD(&component
->output
[idx
].buffers
);
1710 ret
= port_info_get(instance
, &component
->output
[idx
]);
1712 goto release_component
;
1715 for (idx
= 0; idx
< component
->clocks
; idx
++) {
1716 component
->clock
[idx
].type
= MMAL_PORT_TYPE_CLOCK
;
1717 component
->clock
[idx
].index
= idx
;
1718 component
->clock
[idx
].component
= component
;
1719 spin_lock_init(&component
->clock
[idx
].slock
);
1720 INIT_LIST_HEAD(&component
->clock
[idx
].buffers
);
1721 ret
= port_info_get(instance
, &component
->clock
[idx
]);
1723 goto release_component
;
1726 instance
->component_idx
++;
1728 *component_out
= component
;
1730 mutex_unlock(&instance
->vchiq_mutex
);
1735 destroy_component(instance
, component
);
1737 mutex_unlock(&instance
->vchiq_mutex
);
1743 * cause a mmal component to be destroyed
1745 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance
*instance
,
1746 struct vchiq_mmal_component
*component
)
1750 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1753 if (component
->enabled
)
1754 ret
= disable_component(instance
, component
);
1756 ret
= destroy_component(instance
, component
);
1758 mutex_unlock(&instance
->vchiq_mutex
);
1764 * cause a mmal component to be enabled
1766 int vchiq_mmal_component_enable(struct vchiq_mmal_instance
*instance
,
1767 struct vchiq_mmal_component
*component
)
1771 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1774 if (component
->enabled
) {
1775 mutex_unlock(&instance
->vchiq_mutex
);
1779 ret
= enable_component(instance
, component
);
1781 component
->enabled
= true;
1783 mutex_unlock(&instance
->vchiq_mutex
);
1789 * cause a mmal component to be enabled
1791 int vchiq_mmal_component_disable(struct vchiq_mmal_instance
*instance
,
1792 struct vchiq_mmal_component
*component
)
1796 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1799 if (!component
->enabled
) {
1800 mutex_unlock(&instance
->vchiq_mutex
);
1804 ret
= disable_component(instance
, component
);
1806 component
->enabled
= false;
1808 mutex_unlock(&instance
->vchiq_mutex
);
1813 int vchiq_mmal_version(struct vchiq_mmal_instance
*instance
,
1814 u32
*major_out
, u32
*minor_out
)
1818 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1821 ret
= get_version(instance
, major_out
, minor_out
);
1823 mutex_unlock(&instance
->vchiq_mutex
);
1828 int vchiq_mmal_finalise(struct vchiq_mmal_instance
*instance
)
1832 if (instance
== NULL
)
1835 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1838 vchi_service_use(instance
->handle
);
1840 status
= vchi_service_close(instance
->handle
);
1842 pr_err("mmal-vchiq: VCHIQ close failed");
1844 mutex_unlock(&instance
->vchiq_mutex
);
1846 vfree(instance
->bulk_scratch
);
1853 int vchiq_mmal_init(struct vchiq_mmal_instance
**out_instance
)
1856 struct vchiq_mmal_instance
*instance
;
1857 static VCHI_CONNECTION_T
*vchi_connection
;
1858 static VCHI_INSTANCE_T vchi_instance
;
1859 SERVICE_CREATION_T params
= {
1860 VCHI_VERSION_EX(VC_MMAL_VER
, VC_MMAL_MIN_VER
),
1861 VC_MMAL_SERVER_NAME
,
1863 0, /* rx fifo size (unused) */
1864 0, /* tx fifo size (unused) */
1866 NULL
, /* service callback parameter */
1867 1, /* unaligned bulk receives */
1868 1, /* unaligned bulk transmits */
1869 0 /* want crc check on bulk transfers */
1872 /* compile time checks to ensure structure size as they are
1873 * directly (de)serialised from memory.
1876 /* ensure the header structure has packed to the correct size */
1877 BUILD_BUG_ON(sizeof(struct mmal_msg_header
) != 24);
1879 /* ensure message structure does not exceed maximum length */
1880 BUILD_BUG_ON(sizeof(struct mmal_msg
) > MMAL_MSG_MAX_SIZE
);
1882 /* mmal port struct is correct size */
1883 BUILD_BUG_ON(sizeof(struct mmal_port
) != 64);
1885 /* create a vchi instance */
1886 status
= vchi_initialise(&vchi_instance
);
1888 pr_err("Failed to initialise VCHI instance (status=%d)\n",
1893 status
= vchi_connect(NULL
, 0, vchi_instance
);
1895 pr_err("Failed to connect VCHI instance (status=%d)\n", status
);
1899 instance
= kmalloc(sizeof(*instance
), GFP_KERNEL
);
1900 memset(instance
, 0, sizeof(*instance
));
1902 mutex_init(&instance
->vchiq_mutex
);
1903 mutex_init(&instance
->bulk_mutex
);
1905 instance
->bulk_scratch
= vmalloc(PAGE_SIZE
);
1907 params
.callback_param
= instance
;
1909 status
= vchi_service_open(vchi_instance
, ¶ms
, &instance
->handle
);
1911 pr_err("Failed to open VCHI service connection (status=%d)\n",
1913 goto err_close_services
;
1916 vchi_service_release(instance
->handle
);
1918 *out_instance
= instance
;
1924 vchi_service_close(instance
->handle
);
1925 vfree(instance
->bulk_scratch
);