1 // SPDX-License-Identifier: GPL-2.0
3 * Broadcom BM2835 V4L2 driver
5 * Copyright © 2013 Raspberry Pi (Trading) Ltd.
7 * Authors: Vincent Sanders @ Collabora
8 * Dave Stevenson @ Broadcom
9 * (now dave.stevenson@raspberrypi.org)
10 * Simon Mellor @ Broadcom
11 * Luke Diamand @ Broadcom
13 * V4L2 driver MMAL vchiq interface code
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/vmalloc.h>
26 #include <linux/raspberrypi/vchiq.h>
27 #include <media/videobuf2-vmalloc.h>
29 #include "mmal-common.h"
30 #include "mmal-vchiq.h"
34 * maximum number of components supported.
35 * This matches the maximum permitted by default on the VPU
37 #define VCHIQ_MMAL_MAX_COMPONENTS 64
40 * Timeout for synchronous msg responses in seconds.
41 * Helpful to increase this if stopping in the VPU debugger.
43 #define SYNC_MSG_TIMEOUT 3
45 /*#define FULL_MSG_DUMP 1*/
48 static const char *const msg_type_names
[] = {
66 "GET_CORE_STATS_FOR_PORT",
70 "OPAQUE_ALLOCATOR_DESC",
73 "BUFFER_FROM_HOST_ZEROLEN",
79 static const char *const port_action_type_names
[] = {
90 #if defined(FULL_MSG_DUMP)
91 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
93 pr_debug(TITLE" type:%s(%d) length:%d\n", \
94 msg_type_names[(MSG)->h.type], \
95 (MSG)->h.type, (MSG_LEN)); \
96 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
98 sizeof(struct mmal_msg_header), 1); \
99 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
101 ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
102 (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
105 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
107 pr_debug(TITLE" type:%s(%d) length:%d\n", \
108 msg_type_names[(MSG)->h.type], \
109 (MSG)->h.type, (MSG_LEN)); \
113 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
116 struct vchiq_mmal_instance
;
118 /* normal message context */
119 struct mmal_msg_context
{
120 struct vchiq_mmal_instance
*instance
;
122 /* Index in the context_map idr so that we can find the
123 * mmal_msg_context again when servicing the VCHI reply.
129 /* work struct for buffer_cb callback */
130 struct work_struct work
;
131 /* work struct for deferred callback */
132 struct work_struct buffer_to_host_work
;
134 struct vchiq_mmal_instance
*instance
;
136 struct vchiq_mmal_port
*port
;
137 /* actual buffer used to store bulk reply */
138 struct mmal_buffer
*buffer
;
139 /* amount of buffer used */
140 unsigned long buffer_used
;
141 /* MMAL buffer flags */
143 /* Presentation and Decode timestamps */
147 int status
; /* context status */
149 } bulk
; /* bulk data */
152 /* message handle to release */
153 struct vchiq_header
*msg_handle
;
154 /* pointer to received message */
155 struct mmal_msg
*msg
;
156 /* received message length */
158 /* completion upon reply */
159 struct completion cmplt
;
160 } sync
; /* synchronous response */
165 struct vchiq_mmal_instance
{
166 unsigned int service_handle
;
168 /* ensure serialised access to service */
169 struct mutex vchiq_mutex
;
171 /* vmalloc page to receive scratch bulk xfers into */
174 struct idr context_map
;
175 /* protect accesses to context_map */
176 struct mutex context_map_lock
;
178 struct vchiq_mmal_component component
[VCHIQ_MMAL_MAX_COMPONENTS
];
180 /* ordered workqueue to process all bulk operations */
181 struct workqueue_struct
*bulk_wq
;
183 /* handle for a vchiq instance */
184 struct vchiq_instance
*vchiq_instance
;
187 static struct mmal_msg_context
*
188 get_msg_context(struct vchiq_mmal_instance
*instance
)
190 struct mmal_msg_context
*msg_context
;
193 /* todo: should this be allocated from a pool to avoid kzalloc */
194 msg_context
= kzalloc(sizeof(*msg_context
), GFP_KERNEL
);
197 return ERR_PTR(-ENOMEM
);
199 /* Create an ID that will be passed along with our message so
200 * that when we service the VCHI reply, we can look up what
201 * message is being replied to.
203 mutex_lock(&instance
->context_map_lock
);
204 handle
= idr_alloc(&instance
->context_map
, msg_context
,
206 mutex_unlock(&instance
->context_map_lock
);
210 return ERR_PTR(handle
);
213 msg_context
->instance
= instance
;
214 msg_context
->handle
= handle
;
219 static struct mmal_msg_context
*
220 lookup_msg_context(struct vchiq_mmal_instance
*instance
, int handle
)
222 return idr_find(&instance
->context_map
, handle
);
226 release_msg_context(struct mmal_msg_context
*msg_context
)
228 struct vchiq_mmal_instance
*instance
= msg_context
->instance
;
230 mutex_lock(&instance
->context_map_lock
);
231 idr_remove(&instance
->context_map
, msg_context
->handle
);
232 mutex_unlock(&instance
->context_map_lock
);
236 /* deals with receipt of event to host message */
237 static void event_to_host_cb(struct vchiq_mmal_instance
*instance
,
238 struct mmal_msg
*msg
, u32 msg_len
)
240 pr_debug("unhandled event\n");
241 pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
242 msg
->u
.event_to_host
.client_component
,
243 msg
->u
.event_to_host
.port_type
,
244 msg
->u
.event_to_host
.port_num
,
245 msg
->u
.event_to_host
.cmd
, msg
->u
.event_to_host
.length
);
248 /* workqueue scheduled callback
250 * we do this because it is important we do not call any other vchiq
251 * sync calls from witin the message delivery thread
253 static void buffer_work_cb(struct work_struct
*work
)
255 struct mmal_msg_context
*msg_context
=
256 container_of(work
, struct mmal_msg_context
, u
.bulk
.work
);
257 struct mmal_buffer
*buffer
= msg_context
->u
.bulk
.buffer
;
260 pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
261 __func__
, msg_context
);
265 buffer
->length
= msg_context
->u
.bulk
.buffer_used
;
266 buffer
->mmal_flags
= msg_context
->u
.bulk
.mmal_flags
;
267 buffer
->dts
= msg_context
->u
.bulk
.dts
;
268 buffer
->pts
= msg_context
->u
.bulk
.pts
;
270 atomic_dec(&msg_context
->u
.bulk
.port
->buffers_with_vpu
);
272 msg_context
->u
.bulk
.port
->buffer_cb(msg_context
->u
.bulk
.instance
,
273 msg_context
->u
.bulk
.port
,
274 msg_context
->u
.bulk
.status
,
275 msg_context
->u
.bulk
.buffer
);
278 /* workqueue scheduled callback to handle receiving buffers
280 * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
281 * If we block in the service_callback context then we can't process the
282 * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
283 * vchiq_bulk_receive() call to complete.
285 static void buffer_to_host_work_cb(struct work_struct
*work
)
287 struct mmal_msg_context
*msg_context
=
288 container_of(work
, struct mmal_msg_context
,
289 u
.bulk
.buffer_to_host_work
);
290 struct vchiq_mmal_instance
*instance
= msg_context
->instance
;
291 unsigned long len
= msg_context
->u
.bulk
.buffer_used
;
295 /* Dummy receive to ensure the buffers remain in order */
297 /* queue the bulk submission */
298 vchiq_use_service(instance
->service_handle
);
299 ret
= vchiq_bulk_receive(instance
->service_handle
,
300 msg_context
->u
.bulk
.buffer
->buffer
,
301 /* Actual receive needs to be a multiple
306 VCHIQ_BULK_MODE_CALLBACK
);
308 vchiq_release_service(instance
->service_handle
);
311 pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n",
312 __func__
, msg_context
, ret
);
315 /* enqueue a bulk receive for a given message context */
316 static int bulk_receive(struct vchiq_mmal_instance
*instance
,
317 struct mmal_msg
*msg
,
318 struct mmal_msg_context
*msg_context
)
320 unsigned long rd_len
;
322 rd_len
= msg
->u
.buffer_from_host
.buffer_header
.length
;
324 if (!msg_context
->u
.bulk
.buffer
) {
325 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
327 /* todo: this is a serious error, we should never have
328 * committed a buffer_to_host operation to the mmal
329 * port without the buffer to back it up (underflow
330 * handling) and there is no obvious way to deal with
331 * this - how is the mmal servie going to react when
332 * we fail to do the xfer and reschedule a buffer when
333 * it arrives? perhaps a starved flag to indicate a
334 * waiting bulk receive?
340 /* ensure we do not overrun the available buffer */
341 if (rd_len
> msg_context
->u
.bulk
.buffer
->buffer_size
) {
342 rd_len
= msg_context
->u
.bulk
.buffer
->buffer_size
;
343 pr_warn("short read as not enough receive buffer space\n");
344 /* todo: is this the correct response, what happens to
345 * the rest of the message data?
350 msg_context
->u
.bulk
.buffer_used
= rd_len
;
351 msg_context
->u
.bulk
.dts
= msg
->u
.buffer_from_host
.buffer_header
.dts
;
352 msg_context
->u
.bulk
.pts
= msg
->u
.buffer_from_host
.buffer_header
.pts
;
354 queue_work(msg_context
->instance
->bulk_wq
,
355 &msg_context
->u
.bulk
.buffer_to_host_work
);
360 /* data in message, memcpy from packet into output buffer */
361 static int inline_receive(struct vchiq_mmal_instance
*instance
,
362 struct mmal_msg
*msg
,
363 struct mmal_msg_context
*msg_context
)
365 memcpy(msg_context
->u
.bulk
.buffer
->buffer
,
366 msg
->u
.buffer_from_host
.short_data
,
367 msg
->u
.buffer_from_host
.payload_in_message
);
369 msg_context
->u
.bulk
.buffer_used
=
370 msg
->u
.buffer_from_host
.payload_in_message
;
375 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
377 buffer_from_host(struct vchiq_mmal_instance
*instance
,
378 struct vchiq_mmal_port
*port
, struct mmal_buffer
*buf
)
380 struct mmal_msg_context
*msg_context
;
387 pr_debug("instance:%u buffer:%p\n", instance
->service_handle
, buf
);
390 if (!buf
->msg_context
) {
391 pr_err("%s: msg_context not allocated, buf %p\n", __func__
,
395 msg_context
= buf
->msg_context
;
397 /* store bulk message context for when data arrives */
398 msg_context
->u
.bulk
.instance
= instance
;
399 msg_context
->u
.bulk
.port
= port
;
400 msg_context
->u
.bulk
.buffer
= buf
;
401 msg_context
->u
.bulk
.buffer_used
= 0;
403 /* initialise work structure ready to schedule callback */
404 INIT_WORK(&msg_context
->u
.bulk
.work
, buffer_work_cb
);
405 INIT_WORK(&msg_context
->u
.bulk
.buffer_to_host_work
,
406 buffer_to_host_work_cb
);
408 atomic_inc(&port
->buffers_with_vpu
);
410 /* prep the buffer from host message */
411 memset(&m
, 0xbc, sizeof(m
)); /* just to make debug clearer */
413 m
.h
.type
= MMAL_MSG_TYPE_BUFFER_FROM_HOST
;
414 m
.h
.magic
= MMAL_MAGIC
;
415 m
.h
.context
= msg_context
->handle
;
418 /* drvbuf is our private data passed back */
419 m
.u
.buffer_from_host
.drvbuf
.magic
= MMAL_MAGIC
;
420 m
.u
.buffer_from_host
.drvbuf
.component_handle
= port
->component
->handle
;
421 m
.u
.buffer_from_host
.drvbuf
.port_handle
= port
->handle
;
422 m
.u
.buffer_from_host
.drvbuf
.client_context
= msg_context
->handle
;
425 m
.u
.buffer_from_host
.buffer_header
.cmd
= 0;
426 m
.u
.buffer_from_host
.buffer_header
.data
=
427 (u32
)(unsigned long)buf
->buffer
;
428 m
.u
.buffer_from_host
.buffer_header
.alloc_size
= buf
->buffer_size
;
429 m
.u
.buffer_from_host
.buffer_header
.length
= 0; /* nothing used yet */
430 m
.u
.buffer_from_host
.buffer_header
.offset
= 0; /* no offset */
431 m
.u
.buffer_from_host
.buffer_header
.flags
= 0; /* no flags */
432 m
.u
.buffer_from_host
.buffer_header
.pts
= MMAL_TIME_UNKNOWN
;
433 m
.u
.buffer_from_host
.buffer_header
.dts
= MMAL_TIME_UNKNOWN
;
435 /* clear buffer type sepecific data */
436 memset(&m
.u
.buffer_from_host
.buffer_header_type_specific
, 0,
437 sizeof(m
.u
.buffer_from_host
.buffer_header_type_specific
));
439 /* no payload in message */
440 m
.u
.buffer_from_host
.payload_in_message
= 0;
442 vchiq_use_service(instance
->service_handle
);
444 ret
= vchiq_queue_kernel_message(instance
->service_handle
, &m
,
445 sizeof(struct mmal_msg_header
) +
446 sizeof(m
.u
.buffer_from_host
));
448 atomic_dec(&port
->buffers_with_vpu
);
450 vchiq_release_service(instance
->service_handle
);
455 /* deals with receipt of buffer to host message */
456 static void buffer_to_host_cb(struct vchiq_mmal_instance
*instance
,
457 struct mmal_msg
*msg
, u32 msg_len
)
459 struct mmal_msg_context
*msg_context
;
462 pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
463 __func__
, instance
, msg
, msg_len
);
465 if (msg
->u
.buffer_from_host
.drvbuf
.magic
== MMAL_MAGIC
) {
466 handle
= msg
->u
.buffer_from_host
.drvbuf
.client_context
;
467 msg_context
= lookup_msg_context(instance
, handle
);
470 pr_err("drvbuf.client_context(%u) is invalid\n",
475 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
479 msg_context
->u
.bulk
.mmal_flags
=
480 msg
->u
.buffer_from_host
.buffer_header
.flags
;
482 if (msg
->h
.status
!= MMAL_MSG_STATUS_SUCCESS
) {
483 /* message reception had an error */
484 pr_warn("error %d in reply\n", msg
->h
.status
);
486 msg_context
->u
.bulk
.status
= msg
->h
.status
;
488 } else if (msg
->u
.buffer_from_host
.buffer_header
.length
== 0) {
490 if (msg
->u
.buffer_from_host
.buffer_header
.flags
&
491 MMAL_BUFFER_HEADER_FLAG_EOS
) {
492 msg_context
->u
.bulk
.status
=
493 bulk_receive(instance
, msg
, msg_context
);
494 if (msg_context
->u
.bulk
.status
== 0)
495 return; /* successful bulk submission, bulk
496 * completion will trigger callback
499 /* do callback with empty buffer - not EOS though */
500 msg_context
->u
.bulk
.status
= 0;
501 msg_context
->u
.bulk
.buffer_used
= 0;
503 } else if (msg
->u
.buffer_from_host
.payload_in_message
== 0) {
504 /* data is not in message, queue a bulk receive */
505 msg_context
->u
.bulk
.status
=
506 bulk_receive(instance
, msg
, msg_context
);
507 if (msg_context
->u
.bulk
.status
== 0)
508 return; /* successful bulk submission, bulk
509 * completion will trigger callback
512 /* failed to submit buffer, this will end badly */
513 pr_err("error %d on bulk submission\n",
514 msg_context
->u
.bulk
.status
);
516 } else if (msg
->u
.buffer_from_host
.payload_in_message
<=
517 MMAL_VC_SHORT_DATA
) {
518 /* data payload within message */
519 msg_context
->u
.bulk
.status
= inline_receive(instance
, msg
,
522 pr_err("message with invalid short payload\n");
525 msg_context
->u
.bulk
.status
= -EINVAL
;
526 msg_context
->u
.bulk
.buffer_used
=
527 msg
->u
.buffer_from_host
.payload_in_message
;
530 /* schedule the port callback */
531 schedule_work(&msg_context
->u
.bulk
.work
);
534 static void bulk_receive_cb(struct vchiq_mmal_instance
*instance
,
535 struct mmal_msg_context
*msg_context
)
537 msg_context
->u
.bulk
.status
= 0;
539 /* schedule the port callback */
540 schedule_work(&msg_context
->u
.bulk
.work
);
543 static void bulk_abort_cb(struct vchiq_mmal_instance
*instance
,
544 struct mmal_msg_context
*msg_context
)
546 pr_err("%s: bulk ABORTED msg_context:%p\n", __func__
, msg_context
);
548 msg_context
->u
.bulk
.status
= -EINTR
;
550 schedule_work(&msg_context
->u
.bulk
.work
);
553 /* incoming event service callback */
554 static enum vchiq_status
service_callback(enum vchiq_reason reason
,
555 struct vchiq_header
*header
,
556 unsigned int handle
, void *bulk_ctx
)
558 struct vchiq_mmal_instance
*instance
= vchiq_get_service_userdata(handle
);
560 struct mmal_msg
*msg
;
561 struct mmal_msg_context
*msg_context
;
564 pr_err("Message callback passed NULL instance\n");
565 return VCHIQ_SUCCESS
;
569 case VCHIQ_MESSAGE_AVAILABLE
:
570 msg
= (void *)header
->data
;
571 msg_len
= header
->size
;
573 DBG_DUMP_MSG(msg
, msg_len
, "<<< reply message");
575 /* handling is different for buffer messages */
576 switch (msg
->h
.type
) {
577 case MMAL_MSG_TYPE_BUFFER_FROM_HOST
:
578 vchiq_release_message(handle
, header
);
581 case MMAL_MSG_TYPE_EVENT_TO_HOST
:
582 event_to_host_cb(instance
, msg
, msg_len
);
583 vchiq_release_message(handle
, header
);
587 case MMAL_MSG_TYPE_BUFFER_TO_HOST
:
588 buffer_to_host_cb(instance
, msg
, msg_len
);
589 vchiq_release_message(handle
, header
);
593 /* messages dependent on header context to complete */
594 if (!msg
->h
.context
) {
595 pr_err("received message context was null!\n");
596 vchiq_release_message(handle
, header
);
600 msg_context
= lookup_msg_context(instance
,
603 pr_err("received invalid message context %u!\n",
605 vchiq_release_message(handle
, header
);
609 /* fill in context values */
610 msg_context
->u
.sync
.msg_handle
= header
;
611 msg_context
->u
.sync
.msg
= msg
;
612 msg_context
->u
.sync
.msg_len
= msg_len
;
614 /* todo: should this check (completion_done()
615 * == 1) for no one waiting? or do we need a
616 * flag to tell us the completion has been
617 * interrupted so we can free the message and
618 * its context. This probably also solves the
619 * message arriving after interruption todo
623 /* complete message so caller knows it happened */
624 complete(&msg_context
->u
.sync
.cmplt
);
630 case VCHIQ_BULK_RECEIVE_DONE
:
631 bulk_receive_cb(instance
, bulk_ctx
);
634 case VCHIQ_BULK_RECEIVE_ABORTED
:
635 bulk_abort_cb(instance
, bulk_ctx
);
638 case VCHIQ_SERVICE_CLOSED
:
639 /* TODO: consider if this requires action if received when
640 * driver is not explicitly closing the service
645 pr_err("Received unhandled message reason %d\n", reason
);
649 return VCHIQ_SUCCESS
;
652 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance
*instance
,
653 struct mmal_msg
*msg
,
654 unsigned int payload_len
,
655 struct mmal_msg
**msg_out
,
656 struct vchiq_header
**msg_handle
)
658 struct mmal_msg_context
*msg_context
;
660 unsigned long timeout
;
662 /* payload size must not cause message to exceed max size */
664 (MMAL_MSG_MAX_SIZE
- sizeof(struct mmal_msg_header
))) {
665 pr_err("payload length %d exceeds max:%d\n", payload_len
,
666 (int)(MMAL_MSG_MAX_SIZE
-
667 sizeof(struct mmal_msg_header
)));
671 msg_context
= get_msg_context(instance
);
672 if (IS_ERR(msg_context
))
673 return PTR_ERR(msg_context
);
675 init_completion(&msg_context
->u
.sync
.cmplt
);
677 msg
->h
.magic
= MMAL_MAGIC
;
678 msg
->h
.context
= msg_context
->handle
;
681 DBG_DUMP_MSG(msg
, (sizeof(struct mmal_msg_header
) + payload_len
),
684 vchiq_use_service(instance
->service_handle
);
686 ret
= vchiq_queue_kernel_message(instance
->service_handle
, msg
,
687 sizeof(struct mmal_msg_header
) +
690 vchiq_release_service(instance
->service_handle
);
693 pr_err("error %d queuing message\n", ret
);
694 release_msg_context(msg_context
);
698 timeout
= wait_for_completion_timeout(&msg_context
->u
.sync
.cmplt
,
699 SYNC_MSG_TIMEOUT
* HZ
);
701 pr_err("timed out waiting for sync completion\n");
703 /* todo: what happens if the message arrives after aborting */
704 release_msg_context(msg_context
);
708 *msg_out
= msg_context
->u
.sync
.msg
;
709 *msg_handle
= msg_context
->u
.sync
.msg_handle
;
710 release_msg_context(msg_context
);
715 static void dump_port_info(struct vchiq_mmal_port
*port
)
717 pr_debug("port handle:0x%x enabled:%d\n", port
->handle
, port
->enabled
);
719 pr_debug("buffer minimum num:%d size:%d align:%d\n",
720 port
->minimum_buffer
.num
,
721 port
->minimum_buffer
.size
, port
->minimum_buffer
.alignment
);
723 pr_debug("buffer recommended num:%d size:%d align:%d\n",
724 port
->recommended_buffer
.num
,
725 port
->recommended_buffer
.size
,
726 port
->recommended_buffer
.alignment
);
728 pr_debug("buffer current values num:%d size:%d align:%d\n",
729 port
->current_buffer
.num
,
730 port
->current_buffer
.size
, port
->current_buffer
.alignment
);
732 pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
734 port
->format
.encoding
, port
->format
.encoding_variant
);
736 pr_debug(" bitrate:%d flags:0x%x\n",
737 port
->format
.bitrate
, port
->format
.flags
);
739 if (port
->format
.type
== MMAL_ES_TYPE_VIDEO
) {
741 ("es video format: width:%d height:%d colourspace:0x%x\n",
742 port
->es
.video
.width
, port
->es
.video
.height
,
743 port
->es
.video
.color_space
);
745 pr_debug(" : crop xywh %d,%d,%d,%d\n",
746 port
->es
.video
.crop
.x
,
747 port
->es
.video
.crop
.y
,
748 port
->es
.video
.crop
.width
, port
->es
.video
.crop
.height
);
749 pr_debug(" : framerate %d/%d aspect %d/%d\n",
750 port
->es
.video
.frame_rate
.num
,
751 port
->es
.video
.frame_rate
.den
,
752 port
->es
.video
.par
.num
, port
->es
.video
.par
.den
);
756 static void port_to_mmal_msg(struct vchiq_mmal_port
*port
, struct mmal_port
*p
)
758 /* todo do readonly fields need setting at all? */
759 p
->type
= port
->type
;
760 p
->index
= port
->index
;
762 p
->is_enabled
= port
->enabled
;
763 p
->buffer_num_min
= port
->minimum_buffer
.num
;
764 p
->buffer_size_min
= port
->minimum_buffer
.size
;
765 p
->buffer_alignment_min
= port
->minimum_buffer
.alignment
;
766 p
->buffer_num_recommended
= port
->recommended_buffer
.num
;
767 p
->buffer_size_recommended
= port
->recommended_buffer
.size
;
769 /* only three writable fields in a port */
770 p
->buffer_num
= port
->current_buffer
.num
;
771 p
->buffer_size
= port
->current_buffer
.size
;
772 p
->userdata
= (u32
)(unsigned long)port
;
775 static int port_info_set(struct vchiq_mmal_instance
*instance
,
776 struct vchiq_mmal_port
*port
)
780 struct mmal_msg
*rmsg
;
781 struct vchiq_header
*rmsg_handle
;
783 pr_debug("setting port info port %p\n", port
);
786 dump_port_info(port
);
788 m
.h
.type
= MMAL_MSG_TYPE_PORT_INFO_SET
;
790 m
.u
.port_info_set
.component_handle
= port
->component
->handle
;
791 m
.u
.port_info_set
.port_type
= port
->type
;
792 m
.u
.port_info_set
.port_index
= port
->index
;
794 port_to_mmal_msg(port
, &m
.u
.port_info_set
.port
);
796 /* elementary stream format setup */
797 m
.u
.port_info_set
.format
.type
= port
->format
.type
;
798 m
.u
.port_info_set
.format
.encoding
= port
->format
.encoding
;
799 m
.u
.port_info_set
.format
.encoding_variant
=
800 port
->format
.encoding_variant
;
801 m
.u
.port_info_set
.format
.bitrate
= port
->format
.bitrate
;
802 m
.u
.port_info_set
.format
.flags
= port
->format
.flags
;
804 memcpy(&m
.u
.port_info_set
.es
, &port
->es
,
805 sizeof(union mmal_es_specific_format
));
807 m
.u
.port_info_set
.format
.extradata_size
= port
->format
.extradata_size
;
808 memcpy(&m
.u
.port_info_set
.extradata
, port
->format
.extradata
,
809 port
->format
.extradata_size
);
811 ret
= send_synchronous_mmal_msg(instance
, &m
,
812 sizeof(m
.u
.port_info_set
),
813 &rmsg
, &rmsg_handle
);
817 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_INFO_SET
) {
818 /* got an unexpected message type in reply */
823 /* return operation status */
824 ret
= -rmsg
->u
.port_info_get_reply
.status
;
826 pr_debug("%s:result:%d component:0x%x port:%d\n", __func__
, ret
,
827 port
->component
->handle
, port
->handle
);
830 vchiq_release_message(instance
->service_handle
, rmsg_handle
);
835 /* use port info get message to retrieve port information */
836 static int port_info_get(struct vchiq_mmal_instance
*instance
,
837 struct vchiq_mmal_port
*port
)
841 struct mmal_msg
*rmsg
;
842 struct vchiq_header
*rmsg_handle
;
845 m
.h
.type
= MMAL_MSG_TYPE_PORT_INFO_GET
;
846 m
.u
.port_info_get
.component_handle
= port
->component
->handle
;
847 m
.u
.port_info_get
.port_type
= port
->type
;
848 m
.u
.port_info_get
.index
= port
->index
;
850 ret
= send_synchronous_mmal_msg(instance
, &m
,
851 sizeof(m
.u
.port_info_get
),
852 &rmsg
, &rmsg_handle
);
856 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_INFO_GET
) {
857 /* got an unexpected message type in reply */
862 /* return operation status */
863 ret
= -rmsg
->u
.port_info_get_reply
.status
;
864 if (ret
!= MMAL_MSG_STATUS_SUCCESS
)
867 if (rmsg
->u
.port_info_get_reply
.port
.is_enabled
== 0)
872 /* copy the values out of the message */
873 port
->handle
= rmsg
->u
.port_info_get_reply
.port_handle
;
875 /* port type and index cached to use on port info set because
876 * it does not use a port handle
878 port
->type
= rmsg
->u
.port_info_get_reply
.port_type
;
879 port
->index
= rmsg
->u
.port_info_get_reply
.port_index
;
881 port
->minimum_buffer
.num
=
882 rmsg
->u
.port_info_get_reply
.port
.buffer_num_min
;
883 port
->minimum_buffer
.size
=
884 rmsg
->u
.port_info_get_reply
.port
.buffer_size_min
;
885 port
->minimum_buffer
.alignment
=
886 rmsg
->u
.port_info_get_reply
.port
.buffer_alignment_min
;
888 port
->recommended_buffer
.alignment
=
889 rmsg
->u
.port_info_get_reply
.port
.buffer_alignment_min
;
890 port
->recommended_buffer
.num
=
891 rmsg
->u
.port_info_get_reply
.port
.buffer_num_recommended
;
893 port
->current_buffer
.num
= rmsg
->u
.port_info_get_reply
.port
.buffer_num
;
894 port
->current_buffer
.size
=
895 rmsg
->u
.port_info_get_reply
.port
.buffer_size
;
898 port
->format
.type
= rmsg
->u
.port_info_get_reply
.format
.type
;
899 port
->format
.encoding
= rmsg
->u
.port_info_get_reply
.format
.encoding
;
900 port
->format
.encoding_variant
=
901 rmsg
->u
.port_info_get_reply
.format
.encoding_variant
;
902 port
->format
.bitrate
= rmsg
->u
.port_info_get_reply
.format
.bitrate
;
903 port
->format
.flags
= rmsg
->u
.port_info_get_reply
.format
.flags
;
905 /* elementary stream format */
907 &rmsg
->u
.port_info_get_reply
.es
,
908 sizeof(union mmal_es_specific_format
));
909 port
->format
.es
= &port
->es
;
911 port
->format
.extradata_size
=
912 rmsg
->u
.port_info_get_reply
.format
.extradata_size
;
913 memcpy(port
->format
.extradata
,
914 rmsg
->u
.port_info_get_reply
.extradata
,
915 port
->format
.extradata_size
);
917 pr_debug("received port info\n");
918 dump_port_info(port
);
922 pr_debug("%s:result:%d component:0x%x port:%d\n",
923 __func__
, ret
, port
->component
->handle
, port
->handle
);
925 vchiq_release_message(instance
->service_handle
, rmsg_handle
);
930 /* create comonent on vc */
931 static int create_component(struct vchiq_mmal_instance
*instance
,
932 struct vchiq_mmal_component
*component
,
937 struct mmal_msg
*rmsg
;
938 struct vchiq_header
*rmsg_handle
;
940 /* build component create message */
941 m
.h
.type
= MMAL_MSG_TYPE_COMPONENT_CREATE
;
942 m
.u
.component_create
.client_component
= component
->client_component
;
943 strncpy(m
.u
.component_create
.name
, name
,
944 sizeof(m
.u
.component_create
.name
));
946 ret
= send_synchronous_mmal_msg(instance
, &m
,
947 sizeof(m
.u
.component_create
),
948 &rmsg
, &rmsg_handle
);
952 if (rmsg
->h
.type
!= m
.h
.type
) {
953 /* got an unexpected message type in reply */
958 ret
= -rmsg
->u
.component_create_reply
.status
;
959 if (ret
!= MMAL_MSG_STATUS_SUCCESS
)
962 /* a valid component response received */
963 component
->handle
= rmsg
->u
.component_create_reply
.component_handle
;
964 component
->inputs
= rmsg
->u
.component_create_reply
.input_num
;
965 component
->outputs
= rmsg
->u
.component_create_reply
.output_num
;
966 component
->clocks
= rmsg
->u
.component_create_reply
.clock_num
;
968 pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
970 component
->inputs
, component
->outputs
, component
->clocks
);
973 vchiq_release_message(instance
->service_handle
, rmsg_handle
);
978 /* destroys a component on vc */
979 static int destroy_component(struct vchiq_mmal_instance
*instance
,
980 struct vchiq_mmal_component
*component
)
984 struct mmal_msg
*rmsg
;
985 struct vchiq_header
*rmsg_handle
;
987 m
.h
.type
= MMAL_MSG_TYPE_COMPONENT_DESTROY
;
988 m
.u
.component_destroy
.component_handle
= component
->handle
;
990 ret
= send_synchronous_mmal_msg(instance
, &m
,
991 sizeof(m
.u
.component_destroy
),
992 &rmsg
, &rmsg_handle
);
996 if (rmsg
->h
.type
!= m
.h
.type
) {
997 /* got an unexpected message type in reply */
1002 ret
= -rmsg
->u
.component_destroy_reply
.status
;
1006 vchiq_release_message(instance
->service_handle
, rmsg_handle
);
1011 /* enable a component on vc */
1012 static int enable_component(struct vchiq_mmal_instance
*instance
,
1013 struct vchiq_mmal_component
*component
)
1017 struct mmal_msg
*rmsg
;
1018 struct vchiq_header
*rmsg_handle
;
1020 m
.h
.type
= MMAL_MSG_TYPE_COMPONENT_ENABLE
;
1021 m
.u
.component_enable
.component_handle
= component
->handle
;
1023 ret
= send_synchronous_mmal_msg(instance
, &m
,
1024 sizeof(m
.u
.component_enable
),
1025 &rmsg
, &rmsg_handle
);
1029 if (rmsg
->h
.type
!= m
.h
.type
) {
1030 /* got an unexpected message type in reply */
1035 ret
= -rmsg
->u
.component_enable_reply
.status
;
1038 vchiq_release_message(instance
->service_handle
, rmsg_handle
);
1043 /* disable a component on vc */
1044 static int disable_component(struct vchiq_mmal_instance
*instance
,
1045 struct vchiq_mmal_component
*component
)
1049 struct mmal_msg
*rmsg
;
1050 struct vchiq_header
*rmsg_handle
;
1052 m
.h
.type
= MMAL_MSG_TYPE_COMPONENT_DISABLE
;
1053 m
.u
.component_disable
.component_handle
= component
->handle
;
1055 ret
= send_synchronous_mmal_msg(instance
, &m
,
1056 sizeof(m
.u
.component_disable
),
1057 &rmsg
, &rmsg_handle
);
1061 if (rmsg
->h
.type
!= m
.h
.type
) {
1062 /* got an unexpected message type in reply */
1067 ret
= -rmsg
->u
.component_disable_reply
.status
;
1071 vchiq_release_message(instance
->service_handle
, rmsg_handle
);
1076 /* get version of mmal implementation */
1077 static int get_version(struct vchiq_mmal_instance
*instance
,
1078 u32
*major_out
, u32
*minor_out
)
1082 struct mmal_msg
*rmsg
;
1083 struct vchiq_header
*rmsg_handle
;
1085 m
.h
.type
= MMAL_MSG_TYPE_GET_VERSION
;
1087 ret
= send_synchronous_mmal_msg(instance
, &m
,
1088 sizeof(m
.u
.version
),
1089 &rmsg
, &rmsg_handle
);
1093 if (rmsg
->h
.type
!= m
.h
.type
) {
1094 /* got an unexpected message type in reply */
1099 *major_out
= rmsg
->u
.version
.major
;
1100 *minor_out
= rmsg
->u
.version
.minor
;
1103 vchiq_release_message(instance
->service_handle
, rmsg_handle
);
1108 /* do a port action with a port as a parameter */
1109 static int port_action_port(struct vchiq_mmal_instance
*instance
,
1110 struct vchiq_mmal_port
*port
,
1111 enum mmal_msg_port_action_type action_type
)
1115 struct mmal_msg
*rmsg
;
1116 struct vchiq_header
*rmsg_handle
;
1118 m
.h
.type
= MMAL_MSG_TYPE_PORT_ACTION
;
1119 m
.u
.port_action_port
.component_handle
= port
->component
->handle
;
1120 m
.u
.port_action_port
.port_handle
= port
->handle
;
1121 m
.u
.port_action_port
.action
= action_type
;
1123 port_to_mmal_msg(port
, &m
.u
.port_action_port
.port
);
1125 ret
= send_synchronous_mmal_msg(instance
, &m
,
1126 sizeof(m
.u
.port_action_port
),
1127 &rmsg
, &rmsg_handle
);
1131 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_ACTION
) {
1132 /* got an unexpected message type in reply */
1137 ret
= -rmsg
->u
.port_action_reply
.status
;
1139 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1141 ret
, port
->component
->handle
, port
->handle
,
1142 port_action_type_names
[action_type
], action_type
);
1145 vchiq_release_message(instance
->service_handle
, rmsg_handle
);
1150 /* do a port action with handles as parameters */
1151 static int port_action_handle(struct vchiq_mmal_instance
*instance
,
1152 struct vchiq_mmal_port
*port
,
1153 enum mmal_msg_port_action_type action_type
,
1154 u32 connect_component_handle
,
1155 u32 connect_port_handle
)
1159 struct mmal_msg
*rmsg
;
1160 struct vchiq_header
*rmsg_handle
;
1162 m
.h
.type
= MMAL_MSG_TYPE_PORT_ACTION
;
1164 m
.u
.port_action_handle
.component_handle
= port
->component
->handle
;
1165 m
.u
.port_action_handle
.port_handle
= port
->handle
;
1166 m
.u
.port_action_handle
.action
= action_type
;
1168 m
.u
.port_action_handle
.connect_component_handle
=
1169 connect_component_handle
;
1170 m
.u
.port_action_handle
.connect_port_handle
= connect_port_handle
;
1172 ret
= send_synchronous_mmal_msg(instance
, &m
,
1173 sizeof(m
.u
.port_action_handle
),
1174 &rmsg
, &rmsg_handle
);
1178 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_ACTION
) {
1179 /* got an unexpected message type in reply */
1184 ret
= -rmsg
->u
.port_action_reply
.status
;
1186 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1188 ret
, port
->component
->handle
, port
->handle
,
1189 port_action_type_names
[action_type
],
1190 action_type
, connect_component_handle
, connect_port_handle
);
1193 vchiq_release_message(instance
->service_handle
, rmsg_handle
);
1198 static int port_parameter_set(struct vchiq_mmal_instance
*instance
,
1199 struct vchiq_mmal_port
*port
,
1200 u32 parameter_id
, void *value
, u32 value_size
)
1204 struct mmal_msg
*rmsg
;
1205 struct vchiq_header
*rmsg_handle
;
1207 m
.h
.type
= MMAL_MSG_TYPE_PORT_PARAMETER_SET
;
1209 m
.u
.port_parameter_set
.component_handle
= port
->component
->handle
;
1210 m
.u
.port_parameter_set
.port_handle
= port
->handle
;
1211 m
.u
.port_parameter_set
.id
= parameter_id
;
1212 m
.u
.port_parameter_set
.size
= (2 * sizeof(u32
)) + value_size
;
1213 memcpy(&m
.u
.port_parameter_set
.value
, value
, value_size
);
1215 ret
= send_synchronous_mmal_msg(instance
, &m
,
1216 (4 * sizeof(u32
)) + value_size
,
1217 &rmsg
, &rmsg_handle
);
1221 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_PARAMETER_SET
) {
1222 /* got an unexpected message type in reply */
1227 ret
= -rmsg
->u
.port_parameter_set_reply
.status
;
1229 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1231 ret
, port
->component
->handle
, port
->handle
, parameter_id
);
1234 vchiq_release_message(instance
->service_handle
, rmsg_handle
);
1239 static int port_parameter_get(struct vchiq_mmal_instance
*instance
,
1240 struct vchiq_mmal_port
*port
,
1241 u32 parameter_id
, void *value
, u32
*value_size
)
1245 struct mmal_msg
*rmsg
;
1246 struct vchiq_header
*rmsg_handle
;
1248 m
.h
.type
= MMAL_MSG_TYPE_PORT_PARAMETER_GET
;
1250 m
.u
.port_parameter_get
.component_handle
= port
->component
->handle
;
1251 m
.u
.port_parameter_get
.port_handle
= port
->handle
;
1252 m
.u
.port_parameter_get
.id
= parameter_id
;
1253 m
.u
.port_parameter_get
.size
= (2 * sizeof(u32
)) + *value_size
;
1255 ret
= send_synchronous_mmal_msg(instance
, &m
,
1257 mmal_msg_port_parameter_get
),
1258 &rmsg
, &rmsg_handle
);
1262 if (rmsg
->h
.type
!= MMAL_MSG_TYPE_PORT_PARAMETER_GET
) {
1263 /* got an unexpected message type in reply */
1264 pr_err("Incorrect reply type %d\n", rmsg
->h
.type
);
1269 ret
= rmsg
->u
.port_parameter_get_reply
.status
;
1271 /* port_parameter_get_reply.size includes the header,
1272 * whilst *value_size doesn't.
1274 rmsg
->u
.port_parameter_get_reply
.size
-= (2 * sizeof(u32
));
1276 if (ret
|| rmsg
->u
.port_parameter_get_reply
.size
> *value_size
) {
1277 /* Copy only as much as we have space for
1278 * but report true size of parameter
1280 memcpy(value
, &rmsg
->u
.port_parameter_get_reply
.value
,
1283 memcpy(value
, &rmsg
->u
.port_parameter_get_reply
.value
,
1284 rmsg
->u
.port_parameter_get_reply
.size
);
1286 /* Always report the size of the returned parameter to the caller */
1287 *value_size
= rmsg
->u
.port_parameter_get_reply
.size
;
1289 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__
,
1290 ret
, port
->component
->handle
, port
->handle
, parameter_id
);
1293 vchiq_release_message(instance
->service_handle
, rmsg_handle
);
1298 /* disables a port and drains buffers from it */
1299 static int port_disable(struct vchiq_mmal_instance
*instance
,
1300 struct vchiq_mmal_port
*port
)
1303 struct list_head
*q
, *buf_head
;
1304 unsigned long flags
= 0;
1311 ret
= port_action_port(instance
, port
,
1312 MMAL_MSG_PORT_ACTION_TYPE_DISABLE
);
1315 * Drain all queued buffers on port. This should only
1316 * apply to buffers that have been queued before the port
1317 * has been enabled. If the port has been enabled and buffers
1318 * passed, then the buffers should have been removed from this
1319 * list, and we should get the relevant callbacks via VCHIQ
1320 * to release the buffers.
1322 spin_lock_irqsave(&port
->slock
, flags
);
1324 list_for_each_safe(buf_head
, q
, &port
->buffers
) {
1325 struct mmal_buffer
*mmalbuf
;
1327 mmalbuf
= list_entry(buf_head
, struct mmal_buffer
,
1330 if (port
->buffer_cb
) {
1331 mmalbuf
->length
= 0;
1332 mmalbuf
->mmal_flags
= 0;
1333 mmalbuf
->dts
= MMAL_TIME_UNKNOWN
;
1334 mmalbuf
->pts
= MMAL_TIME_UNKNOWN
;
1335 port
->buffer_cb(instance
,
1340 spin_unlock_irqrestore(&port
->slock
, flags
);
1342 ret
= port_info_get(instance
, port
);
1349 static int port_enable(struct vchiq_mmal_instance
*instance
,
1350 struct vchiq_mmal_port
*port
)
1352 unsigned int hdr_count
;
1353 struct list_head
*q
, *buf_head
;
1359 ret
= port_action_port(instance
, port
,
1360 MMAL_MSG_PORT_ACTION_TYPE_ENABLE
);
1366 if (port
->buffer_cb
) {
1367 /* send buffer headers to videocore */
1369 list_for_each_safe(buf_head
, q
, &port
->buffers
) {
1370 struct mmal_buffer
*mmalbuf
;
1372 mmalbuf
= list_entry(buf_head
, struct mmal_buffer
,
1374 ret
= buffer_from_host(instance
, port
, mmalbuf
);
1380 if (hdr_count
> port
->current_buffer
.num
)
1385 ret
= port_info_get(instance
, port
);
1391 /* ------------------------------------------------------------------
1393 *------------------------------------------------------------------
1396 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance
*instance
,
1397 struct vchiq_mmal_port
*port
)
1401 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1404 ret
= port_info_set(instance
, port
);
1406 goto release_unlock
;
1408 /* read what has actually been set */
1409 ret
= port_info_get(instance
, port
);
1412 mutex_unlock(&instance
->vchiq_mutex
);
1416 EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format
);
1418 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance
*instance
,
1419 struct vchiq_mmal_port
*port
,
1420 u32 parameter
, void *value
, u32 value_size
)
1424 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1427 ret
= port_parameter_set(instance
, port
, parameter
, value
, value_size
);
1429 mutex_unlock(&instance
->vchiq_mutex
);
1433 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set
);
1435 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance
*instance
,
1436 struct vchiq_mmal_port
*port
,
1437 u32 parameter
, void *value
, u32
*value_size
)
1441 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1444 ret
= port_parameter_get(instance
, port
, parameter
, value
, value_size
);
1446 mutex_unlock(&instance
->vchiq_mutex
);
1450 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get
);
1454 * enables a port and queues buffers for satisfying callbacks if we
1455 * provide a callback handler
1457 int vchiq_mmal_port_enable(struct vchiq_mmal_instance
*instance
,
1458 struct vchiq_mmal_port
*port
,
1459 vchiq_mmal_buffer_cb buffer_cb
)
1463 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1466 /* already enabled - noop */
1467 if (port
->enabled
) {
1472 port
->buffer_cb
= buffer_cb
;
1474 ret
= port_enable(instance
, port
);
1477 mutex_unlock(&instance
->vchiq_mutex
);
1481 EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable
);
1483 int vchiq_mmal_port_disable(struct vchiq_mmal_instance
*instance
,
1484 struct vchiq_mmal_port
*port
)
1488 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1491 if (!port
->enabled
) {
1492 mutex_unlock(&instance
->vchiq_mutex
);
1496 ret
= port_disable(instance
, port
);
1498 mutex_unlock(&instance
->vchiq_mutex
);
1502 EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable
);
1504 /* ports will be connected in a tunneled manner so data buffers
1505 * are not handled by client.
1507 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance
*instance
,
1508 struct vchiq_mmal_port
*src
,
1509 struct vchiq_mmal_port
*dst
)
1513 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1516 /* disconnect ports if connected */
1517 if (src
->connected
) {
1518 ret
= port_disable(instance
, src
);
1520 pr_err("failed disabling src port(%d)\n", ret
);
1521 goto release_unlock
;
1524 /* do not need to disable the destination port as they
1525 * are connected and it is done automatically
1528 ret
= port_action_handle(instance
, src
,
1529 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT
,
1530 src
->connected
->component
->handle
,
1531 src
->connected
->handle
);
1533 pr_err("failed disconnecting src port\n");
1534 goto release_unlock
;
1536 src
->connected
->enabled
= 0;
1537 src
->connected
= NULL
;
1541 /* do not make new connection */
1543 pr_debug("not making new connection\n");
1544 goto release_unlock
;
1547 /* copy src port format to dst */
1548 dst
->format
.encoding
= src
->format
.encoding
;
1549 dst
->es
.video
.width
= src
->es
.video
.width
;
1550 dst
->es
.video
.height
= src
->es
.video
.height
;
1551 dst
->es
.video
.crop
.x
= src
->es
.video
.crop
.x
;
1552 dst
->es
.video
.crop
.y
= src
->es
.video
.crop
.y
;
1553 dst
->es
.video
.crop
.width
= src
->es
.video
.crop
.width
;
1554 dst
->es
.video
.crop
.height
= src
->es
.video
.crop
.height
;
1555 dst
->es
.video
.frame_rate
.num
= src
->es
.video
.frame_rate
.num
;
1556 dst
->es
.video
.frame_rate
.den
= src
->es
.video
.frame_rate
.den
;
1558 /* set new format */
1559 ret
= port_info_set(instance
, dst
);
1561 pr_debug("setting port info failed\n");
1562 goto release_unlock
;
1565 /* read what has actually been set */
1566 ret
= port_info_get(instance
, dst
);
1568 pr_debug("read back port info failed\n");
1569 goto release_unlock
;
1572 /* connect two ports together */
1573 ret
= port_action_handle(instance
, src
,
1574 MMAL_MSG_PORT_ACTION_TYPE_CONNECT
,
1575 dst
->component
->handle
, dst
->handle
);
1577 pr_debug("connecting port %d:%d to %d:%d failed\n",
1578 src
->component
->handle
, src
->handle
,
1579 dst
->component
->handle
, dst
->handle
);
1580 goto release_unlock
;
1582 src
->connected
= dst
;
1586 mutex_unlock(&instance
->vchiq_mutex
);
1590 EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel
);
1592 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance
*instance
,
1593 struct vchiq_mmal_port
*port
,
1594 struct mmal_buffer
*buffer
)
1596 unsigned long flags
= 0;
1599 ret
= buffer_from_host(instance
, port
, buffer
);
1600 if (ret
== -EINVAL
) {
1601 /* Port is disabled. Queue for when it is enabled. */
1602 spin_lock_irqsave(&port
->slock
, flags
);
1603 list_add_tail(&buffer
->list
, &port
->buffers
);
1604 spin_unlock_irqrestore(&port
->slock
, flags
);
1609 EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer
);
1611 int mmal_vchi_buffer_init(struct vchiq_mmal_instance
*instance
,
1612 struct mmal_buffer
*buf
)
1614 struct mmal_msg_context
*msg_context
= get_msg_context(instance
);
1616 if (IS_ERR(msg_context
))
1617 return (PTR_ERR(msg_context
));
1619 buf
->msg_context
= msg_context
;
1622 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init
);
1624 int mmal_vchi_buffer_cleanup(struct mmal_buffer
*buf
)
1626 struct mmal_msg_context
*msg_context
= buf
->msg_context
;
1629 release_msg_context(msg_context
);
1630 buf
->msg_context
= NULL
;
1634 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup
);
1636 /* Initialise a mmal component and its ports
1639 int vchiq_mmal_component_init(struct vchiq_mmal_instance
*instance
,
1641 struct vchiq_mmal_component
**component_out
)
1644 int idx
; /* port index */
1645 struct vchiq_mmal_component
*component
= NULL
;
1647 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1650 for (idx
= 0; idx
< VCHIQ_MMAL_MAX_COMPONENTS
; idx
++) {
1651 if (!instance
->component
[idx
].in_use
) {
1652 component
= &instance
->component
[idx
];
1653 component
->in_use
= 1;
1659 ret
= -EINVAL
; /* todo is this correct error? */
1663 /* We need a handle to reference back to our component structure.
1664 * Use the array index in instance->component rather than rolling
1667 component
->client_component
= idx
;
1669 ret
= create_component(instance
, component
, name
);
1671 pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1676 /* ports info needs gathering */
1677 component
->control
.type
= MMAL_PORT_TYPE_CONTROL
;
1678 component
->control
.index
= 0;
1679 component
->control
.component
= component
;
1680 spin_lock_init(&component
->control
.slock
);
1681 INIT_LIST_HEAD(&component
->control
.buffers
);
1682 ret
= port_info_get(instance
, &component
->control
);
1684 goto release_component
;
1686 for (idx
= 0; idx
< component
->inputs
; idx
++) {
1687 component
->input
[idx
].type
= MMAL_PORT_TYPE_INPUT
;
1688 component
->input
[idx
].index
= idx
;
1689 component
->input
[idx
].component
= component
;
1690 spin_lock_init(&component
->input
[idx
].slock
);
1691 INIT_LIST_HEAD(&component
->input
[idx
].buffers
);
1692 ret
= port_info_get(instance
, &component
->input
[idx
]);
1694 goto release_component
;
1697 for (idx
= 0; idx
< component
->outputs
; idx
++) {
1698 component
->output
[idx
].type
= MMAL_PORT_TYPE_OUTPUT
;
1699 component
->output
[idx
].index
= idx
;
1700 component
->output
[idx
].component
= component
;
1701 spin_lock_init(&component
->output
[idx
].slock
);
1702 INIT_LIST_HEAD(&component
->output
[idx
].buffers
);
1703 ret
= port_info_get(instance
, &component
->output
[idx
]);
1705 goto release_component
;
1708 for (idx
= 0; idx
< component
->clocks
; idx
++) {
1709 component
->clock
[idx
].type
= MMAL_PORT_TYPE_CLOCK
;
1710 component
->clock
[idx
].index
= idx
;
1711 component
->clock
[idx
].component
= component
;
1712 spin_lock_init(&component
->clock
[idx
].slock
);
1713 INIT_LIST_HEAD(&component
->clock
[idx
].buffers
);
1714 ret
= port_info_get(instance
, &component
->clock
[idx
]);
1716 goto release_component
;
1719 *component_out
= component
;
1721 mutex_unlock(&instance
->vchiq_mutex
);
1726 destroy_component(instance
, component
);
1729 component
->in_use
= 0;
1730 mutex_unlock(&instance
->vchiq_mutex
);
1734 EXPORT_SYMBOL_GPL(vchiq_mmal_component_init
);
1737 * cause a mmal component to be destroyed
1739 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance
*instance
,
1740 struct vchiq_mmal_component
*component
)
1744 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1747 if (component
->enabled
)
1748 ret
= disable_component(instance
, component
);
1750 ret
= destroy_component(instance
, component
);
1752 component
->in_use
= 0;
1754 mutex_unlock(&instance
->vchiq_mutex
);
1758 EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise
);
1761 * cause a mmal component to be enabled
1763 int vchiq_mmal_component_enable(struct vchiq_mmal_instance
*instance
,
1764 struct vchiq_mmal_component
*component
)
1768 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1771 if (component
->enabled
) {
1772 mutex_unlock(&instance
->vchiq_mutex
);
1776 ret
= enable_component(instance
, component
);
1778 component
->enabled
= true;
1780 mutex_unlock(&instance
->vchiq_mutex
);
1784 EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable
);
1787 * cause a mmal component to be enabled
1789 int vchiq_mmal_component_disable(struct vchiq_mmal_instance
*instance
,
1790 struct vchiq_mmal_component
*component
)
1794 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1797 if (!component
->enabled
) {
1798 mutex_unlock(&instance
->vchiq_mutex
);
1802 ret
= disable_component(instance
, component
);
1804 component
->enabled
= 0;
1806 mutex_unlock(&instance
->vchiq_mutex
);
1810 EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable
);
1812 int vchiq_mmal_version(struct vchiq_mmal_instance
*instance
,
1813 u32
*major_out
, u32
*minor_out
)
1817 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1820 ret
= get_version(instance
, major_out
, minor_out
);
1822 mutex_unlock(&instance
->vchiq_mutex
);
1826 EXPORT_SYMBOL_GPL(vchiq_mmal_version
);
1828 int vchiq_mmal_finalise(struct vchiq_mmal_instance
*instance
)
1835 if (mutex_lock_interruptible(&instance
->vchiq_mutex
))
1838 vchiq_use_service(instance
->service_handle
);
1840 status
= vchiq_close_service(instance
->service_handle
);
1842 pr_err("mmal-vchiq: VCHIQ close failed\n");
1844 mutex_unlock(&instance
->vchiq_mutex
);
1846 vchiq_shutdown(instance
->vchiq_instance
);
1847 flush_workqueue(instance
->bulk_wq
);
1848 destroy_workqueue(instance
->bulk_wq
);
1850 vfree(instance
->bulk_scratch
);
1852 idr_destroy(&instance
->context_map
);
1858 EXPORT_SYMBOL_GPL(vchiq_mmal_finalise
);
1860 int vchiq_mmal_init(struct vchiq_mmal_instance
**out_instance
)
1864 struct vchiq_mmal_instance
*instance
;
1865 static struct vchiq_instance
*vchiq_instance
;
1866 struct vchiq_service_params_kernel params
= {
1867 .version
= VC_MMAL_VER
,
1868 .version_min
= VC_MMAL_MIN_VER
,
1869 .fourcc
= VCHIQ_MAKE_FOURCC('m', 'm', 'a', 'l'),
1870 .callback
= service_callback
,
1874 /* compile time checks to ensure structure size as they are
1875 * directly (de)serialised from memory.
1878 /* ensure the header structure has packed to the correct size */
1879 BUILD_BUG_ON(sizeof(struct mmal_msg_header
) != 24);
1881 /* ensure message structure does not exceed maximum length */
1882 BUILD_BUG_ON(sizeof(struct mmal_msg
) > MMAL_MSG_MAX_SIZE
);
1884 /* mmal port struct is correct size */
1885 BUILD_BUG_ON(sizeof(struct mmal_port
) != 64);
1887 /* create a vchi instance */
1888 status
= vchiq_initialise(&vchiq_instance
);
1890 pr_err("Failed to initialise VCHI instance (status=%d)\n",
1895 status
= vchiq_connect(vchiq_instance
);
1897 pr_err("Failed to connect VCHI instance (status=%d)\n", status
);
1899 goto err_shutdown_vchiq
;
1902 instance
= kzalloc(sizeof(*instance
), GFP_KERNEL
);
1906 goto err_shutdown_vchiq
;
1909 mutex_init(&instance
->vchiq_mutex
);
1911 instance
->bulk_scratch
= vmalloc(PAGE_SIZE
);
1912 instance
->vchiq_instance
= vchiq_instance
;
1914 mutex_init(&instance
->context_map_lock
);
1915 idr_init_base(&instance
->context_map
, 1);
1917 params
.userdata
= instance
;
1919 instance
->bulk_wq
= alloc_ordered_workqueue("mmal-vchiq",
1921 if (!instance
->bulk_wq
)
1924 status
= vchiq_open_service(vchiq_instance
, ¶ms
,
1925 &instance
->service_handle
);
1927 pr_err("Failed to open VCHI service connection (status=%d)\n",
1929 goto err_close_services
;
1932 vchiq_release_service(instance
->service_handle
);
1934 *out_instance
= instance
;
1939 vchiq_close_service(instance
->service_handle
);
1940 destroy_workqueue(instance
->bulk_wq
);
1942 vfree(instance
->bulk_scratch
);
1945 vchiq_shutdown(vchiq_instance
);
1948 EXPORT_SYMBOL_GPL(vchiq_mmal_init
);
1950 MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
1951 MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
1952 MODULE_LICENSE("GPL");