]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/media/platform/bcm2835/mmal-vchiq.c
bcm2835-camera: Correct port_parameter_get return value
[mirror_ubuntu-zesty-kernel.git] / drivers / media / platform / bcm2835 / mmal-vchiq.c
1 /*
2 * Broadcom BM2835 V4L2 driver
3 *
4 * Copyright © 2013 Raspberry Pi (Trading) Ltd.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 *
10 * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
11 * Dave Stevenson <dsteve@broadcom.com>
12 * Simon Mellor <simellor@broadcom.com>
13 * Luke Diamand <luked@broadcom.com>
14 *
15 * V4L2 driver MMAL vchiq interface code
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/mutex.h>
23 #include <linux/mm.h>
24 #include <linux/slab.h>
25 #include <linux/completion.h>
26 #include <linux/vmalloc.h>
27 #include <asm/cacheflush.h>
28 #include <media/videobuf2-vmalloc.h>
29
30 #include "mmal-common.h"
31 #include "mmal-vchiq.h"
32 #include "mmal-msg.h"
33
34 #define USE_VCHIQ_ARM
35 #include "interface/vchi/vchi.h"
36
37 /* maximum number of components supported */
38 #define VCHIQ_MMAL_MAX_COMPONENTS 4
39
40 /*#define FULL_MSG_DUMP 1*/
41
42 #ifdef DEBUG
43 static const char *const msg_type_names[] = {
44 "UNKNOWN",
45 "QUIT",
46 "SERVICE_CLOSED",
47 "GET_VERSION",
48 "COMPONENT_CREATE",
49 "COMPONENT_DESTROY",
50 "COMPONENT_ENABLE",
51 "COMPONENT_DISABLE",
52 "PORT_INFO_GET",
53 "PORT_INFO_SET",
54 "PORT_ACTION",
55 "BUFFER_FROM_HOST",
56 "BUFFER_TO_HOST",
57 "GET_STATS",
58 "PORT_PARAMETER_SET",
59 "PORT_PARAMETER_GET",
60 "EVENT_TO_HOST",
61 "GET_CORE_STATS_FOR_PORT",
62 "OPAQUE_ALLOCATOR",
63 "CONSUME_MEM",
64 "LMK",
65 "OPAQUE_ALLOCATOR_DESC",
66 "DRM_GET_LHS32",
67 "DRM_GET_TIME",
68 "BUFFER_FROM_HOST_ZEROLEN",
69 "PORT_FLUSH",
70 "HOST_LOG",
71 };
72 #endif
73
74 static const char *const port_action_type_names[] = {
75 "UNKNOWN",
76 "ENABLE",
77 "DISABLE",
78 "FLUSH",
79 "CONNECT",
80 "DISCONNECT",
81 "SET_REQUIREMENTS",
82 };
83
84 #if defined(DEBUG)
85 #if defined(FULL_MSG_DUMP)
86 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
87 do { \
88 pr_debug(TITLE" type:%s(%d) length:%d\n", \
89 msg_type_names[(MSG)->h.type], \
90 (MSG)->h.type, (MSG_LEN)); \
91 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
92 16, 4, (MSG), \
93 sizeof(struct mmal_msg_header), 1); \
94 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
95 16, 4, \
96 ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
97 (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
98 } while (0)
99 #else
100 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
101 { \
102 pr_debug(TITLE" type:%s(%d) length:%d\n", \
103 msg_type_names[(MSG)->h.type], \
104 (MSG)->h.type, (MSG_LEN)); \
105 }
106 #endif
107 #else
108 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
109 #endif
110
111 /* normal message context */
112 struct mmal_msg_context {
113 union {
114 struct {
115 /* work struct for defered callback - must come first */
116 struct work_struct work;
117 /* mmal instance */
118 struct vchiq_mmal_instance *instance;
119 /* mmal port */
120 struct vchiq_mmal_port *port;
121 /* actual buffer used to store bulk reply */
122 struct mmal_buffer *buffer;
123 /* amount of buffer used */
124 unsigned long buffer_used;
125 /* MMAL buffer flags */
126 u32 mmal_flags;
127 /* Presentation and Decode timestamps */
128 s64 pts;
129 s64 dts;
130
131 int status; /* context status */
132
133 } bulk; /* bulk data */
134
135 struct {
136 /* message handle to release */
137 VCHI_HELD_MSG_T msg_handle;
138 /* pointer to received message */
139 struct mmal_msg *msg;
140 /* received message length */
141 u32 msg_len;
142 /* completion upon reply */
143 struct completion cmplt;
144 } sync; /* synchronous response */
145 } u;
146
147 };
148
149 struct vchiq_mmal_instance {
150 VCHI_SERVICE_HANDLE_T handle;
151
152 /* ensure serialised access to service */
153 struct mutex vchiq_mutex;
154
155 /* ensure serialised access to bulk operations */
156 struct mutex bulk_mutex;
157
158 /* vmalloc page to receive scratch bulk xfers into */
159 void *bulk_scratch;
160
161 /* component to use next */
162 int component_idx;
163 struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
164 };
165
166 static struct mmal_msg_context *get_msg_context(struct vchiq_mmal_instance
167 *instance)
168 {
169 struct mmal_msg_context *msg_context;
170
171 /* todo: should this be allocated from a pool to avoid kmalloc */
172 msg_context = kmalloc(sizeof(*msg_context), GFP_KERNEL);
173 memset(msg_context, 0, sizeof(*msg_context));
174
175 return msg_context;
176 }
177
178 static void release_msg_context(struct mmal_msg_context *msg_context)
179 {
180 kfree(msg_context);
181 }
182
183 /* deals with receipt of event to host message */
184 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
185 struct mmal_msg *msg, u32 msg_len)
186 {
187 pr_debug("unhandled event\n");
188 pr_debug("component:%p port type:%d num:%d cmd:0x%x length:%d\n",
189 msg->u.event_to_host.client_component,
190 msg->u.event_to_host.port_type,
191 msg->u.event_to_host.port_num,
192 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
193 }
194
195 /* workqueue scheduled callback
196 *
197 * we do this because it is important we do not call any other vchiq
198 * sync calls from witin the message delivery thread
199 */
200 static void buffer_work_cb(struct work_struct *work)
201 {
202 struct mmal_msg_context *msg_context = (struct mmal_msg_context *)work;
203
204 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
205 msg_context->u.bulk.port,
206 msg_context->u.bulk.status,
207 msg_context->u.bulk.buffer,
208 msg_context->u.bulk.buffer_used,
209 msg_context->u.bulk.mmal_flags,
210 msg_context->u.bulk.dts,
211 msg_context->u.bulk.pts);
212
213 /* release message context */
214 release_msg_context(msg_context);
215 }
216
217 /* enqueue a bulk receive for a given message context */
218 static int bulk_receive(struct vchiq_mmal_instance *instance,
219 struct mmal_msg *msg,
220 struct mmal_msg_context *msg_context)
221 {
222 unsigned long rd_len;
223 unsigned long flags = 0;
224 int ret;
225
226 /* bulk mutex stops other bulk operations while we have a
227 * receive in progress - released in callback
228 */
229 ret = mutex_lock_interruptible(&instance->bulk_mutex);
230 if (ret != 0)
231 return ret;
232
233 rd_len = msg->u.buffer_from_host.buffer_header.length;
234
235 /* take buffer from queue */
236 spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
237 if (list_empty(&msg_context->u.bulk.port->buffers)) {
238 spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
239 pr_err("buffer list empty trying to submit bulk receive\n");
240
241 /* todo: this is a serious error, we should never have
242 * commited a buffer_to_host operation to the mmal
243 * port without the buffer to back it up (underflow
244 * handling) and there is no obvious way to deal with
245 * this - how is the mmal servie going to react when
246 * we fail to do the xfer and reschedule a buffer when
247 * it arrives? perhaps a starved flag to indicate a
248 * waiting bulk receive?
249 */
250
251 mutex_unlock(&instance->bulk_mutex);
252
253 return -EINVAL;
254 }
255
256 msg_context->u.bulk.buffer =
257 list_entry(msg_context->u.bulk.port->buffers.next,
258 struct mmal_buffer, list);
259 list_del(&msg_context->u.bulk.buffer->list);
260
261 spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
262
263 /* ensure we do not overrun the available buffer */
264 if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
265 rd_len = msg_context->u.bulk.buffer->buffer_size;
266 pr_warn("short read as not enough receive buffer space\n");
267 /* todo: is this the correct response, what happens to
268 * the rest of the message data?
269 */
270 }
271
272 /* store length */
273 msg_context->u.bulk.buffer_used = rd_len;
274 msg_context->u.bulk.mmal_flags =
275 msg->u.buffer_from_host.buffer_header.flags;
276 msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
277 msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
278
279 // only need to flush L1 cache here, as VCHIQ takes care of the L2
280 // cache.
281 __cpuc_flush_dcache_area(msg_context->u.bulk.buffer->buffer, rd_len);
282
283 /* queue the bulk submission */
284 vchi_service_use(instance->handle);
285 ret = vchi_bulk_queue_receive(instance->handle,
286 msg_context->u.bulk.buffer->buffer,
287 /* Actual receive needs to be a multiple
288 * of 4 bytes
289 */
290 (rd_len + 3) & ~3,
291 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
292 VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
293 msg_context);
294
295 vchi_service_release(instance->handle);
296
297 if (ret != 0) {
298 /* callback will not be clearing the mutex */
299 mutex_unlock(&instance->bulk_mutex);
300 }
301
302 return ret;
303 }
304
305 /* enque a dummy bulk receive for a given message context */
306 static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
307 struct mmal_msg_context *msg_context)
308 {
309 int ret;
310
311 /* bulk mutex stops other bulk operations while we have a
312 * receive in progress - released in callback
313 */
314 ret = mutex_lock_interruptible(&instance->bulk_mutex);
315 if (ret != 0)
316 return ret;
317
318 /* zero length indicates this was a dummy transfer */
319 msg_context->u.bulk.buffer_used = 0;
320
321 /* queue the bulk submission */
322 vchi_service_use(instance->handle);
323
324 ret = vchi_bulk_queue_receive(instance->handle,
325 instance->bulk_scratch,
326 8,
327 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
328 VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
329 msg_context);
330
331 vchi_service_release(instance->handle);
332
333 if (ret != 0) {
334 /* callback will not be clearing the mutex */
335 mutex_unlock(&instance->bulk_mutex);
336 }
337
338 return ret;
339 }
340
341 /* data in message, memcpy from packet into output buffer */
342 static int inline_receive(struct vchiq_mmal_instance *instance,
343 struct mmal_msg *msg,
344 struct mmal_msg_context *msg_context)
345 {
346 unsigned long flags = 0;
347
348 /* take buffer from queue */
349 spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
350 if (list_empty(&msg_context->u.bulk.port->buffers)) {
351 spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
352 pr_err("buffer list empty trying to receive inline\n");
353
354 /* todo: this is a serious error, we should never have
355 * commited a buffer_to_host operation to the mmal
356 * port without the buffer to back it up (with
357 * underflow handling) and there is no obvious way to
358 * deal with this. Less bad than the bulk case as we
359 * can just drop this on the floor but...unhelpful
360 */
361 return -EINVAL;
362 }
363
364 msg_context->u.bulk.buffer =
365 list_entry(msg_context->u.bulk.port->buffers.next,
366 struct mmal_buffer, list);
367 list_del(&msg_context->u.bulk.buffer->list);
368
369 spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
370
371 memcpy(msg_context->u.bulk.buffer->buffer,
372 msg->u.buffer_from_host.short_data,
373 msg->u.buffer_from_host.payload_in_message);
374
375 msg_context->u.bulk.buffer_used =
376 msg->u.buffer_from_host.payload_in_message;
377
378 return 0;
379 }
380
381 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
382 static int
383 buffer_from_host(struct vchiq_mmal_instance *instance,
384 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
385 {
386 struct mmal_msg_context *msg_context;
387 struct mmal_msg m;
388 int ret;
389
390 pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
391
392 /* bulk mutex stops other bulk operations while we
393 * have a receive in progress
394 */
395 if (mutex_lock_interruptible(&instance->bulk_mutex))
396 return -EINTR;
397
398 /* get context */
399 msg_context = get_msg_context(instance);
400 if (msg_context == NULL)
401 return -ENOMEM;
402
403 /* store bulk message context for when data arrives */
404 msg_context->u.bulk.instance = instance;
405 msg_context->u.bulk.port = port;
406 msg_context->u.bulk.buffer = NULL; /* not valid until bulk xfer */
407 msg_context->u.bulk.buffer_used = 0;
408
409 /* initialise work structure ready to schedule callback */
410 INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
411
412 /* prep the buffer from host message */
413 memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
414
415 m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
416 m.h.magic = MMAL_MAGIC;
417 m.h.context = msg_context;
418 m.h.status = 0;
419
420 /* drvbuf is our private data passed back */
421 m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
422 m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
423 m.u.buffer_from_host.drvbuf.port_handle = port->handle;
424 m.u.buffer_from_host.drvbuf.client_context = msg_context;
425
426 /* buffer header */
427 m.u.buffer_from_host.buffer_header.cmd = 0;
428 m.u.buffer_from_host.buffer_header.data = buf->buffer;
429 m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
430 m.u.buffer_from_host.buffer_header.length = 0; /* nothing used yet */
431 m.u.buffer_from_host.buffer_header.offset = 0; /* no offset */
432 m.u.buffer_from_host.buffer_header.flags = 0; /* no flags */
433 m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
434 m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
435
436 /* clear buffer type sepecific data */
437 memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
438 sizeof(m.u.buffer_from_host.buffer_header_type_specific));
439
440 /* no payload in message */
441 m.u.buffer_from_host.payload_in_message = 0;
442
443 vchi_service_use(instance->handle);
444
445 ret = vchi_msg_queue(instance->handle, &m,
446 sizeof(struct mmal_msg_header) +
447 sizeof(m.u.buffer_from_host),
448 VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);
449
450 if (ret != 0) {
451 release_msg_context(msg_context);
452 /* todo: is this correct error value? */
453 }
454
455 vchi_service_release(instance->handle);
456
457 mutex_unlock(&instance->bulk_mutex);
458
459 return ret;
460 }
461
462 /* submit a buffer to the mmal sevice
463 *
464 * the buffer_from_host uses size data from the ports next available
465 * mmal_buffer and deals with there being no buffer available by
466 * incrementing the underflow for later
467 */
468 static int port_buffer_from_host(struct vchiq_mmal_instance *instance,
469 struct vchiq_mmal_port *port)
470 {
471 int ret;
472 struct mmal_buffer *buf;
473 unsigned long flags = 0;
474
475 if (!port->enabled)
476 return -EINVAL;
477
478 /* peek buffer from queue */
479 spin_lock_irqsave(&port->slock, flags);
480 if (list_empty(&port->buffers)) {
481 port->buffer_underflow++;
482 spin_unlock_irqrestore(&port->slock, flags);
483 return -ENOSPC;
484 }
485
486 buf = list_entry(port->buffers.next, struct mmal_buffer, list);
487
488 spin_unlock_irqrestore(&port->slock, flags);
489
490 /* issue buffer to mmal service */
491 ret = buffer_from_host(instance, port, buf);
492 if (ret) {
493 pr_err("adding buffer header failed\n");
494 /* todo: how should this be dealt with */
495 }
496
497 return ret;
498 }
499
500 /* deals with receipt of buffer to host message */
501 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
502 struct mmal_msg *msg, u32 msg_len)
503 {
504 struct mmal_msg_context *msg_context;
505
506 pr_debug("buffer_to_host_cb: instance:%p msg:%p msg_len:%d\n",
507 instance, msg, msg_len);
508
509 if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
510 msg_context = msg->u.buffer_from_host.drvbuf.client_context;
511 } else {
512 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
513 return;
514 }
515
516 if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
517 /* message reception had an error */
518 pr_warn("error %d in reply\n", msg->h.status);
519
520 msg_context->u.bulk.status = msg->h.status;
521
522 } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
523 /* empty buffer */
524 if (msg->u.buffer_from_host.buffer_header.flags &
525 MMAL_BUFFER_HEADER_FLAG_EOS) {
526 msg_context->u.bulk.status =
527 dummy_bulk_receive(instance, msg_context);
528 if (msg_context->u.bulk.status == 0)
529 return; /* successful bulk submission, bulk
530 * completion will trigger callback
531 */
532 } else {
533 /* do callback with empty buffer - not EOS though */
534 msg_context->u.bulk.status = 0;
535 msg_context->u.bulk.buffer_used = 0;
536 }
537 } else if (msg->u.buffer_from_host.payload_in_message == 0) {
538 /* data is not in message, queue a bulk receive */
539 msg_context->u.bulk.status =
540 bulk_receive(instance, msg, msg_context);
541 if (msg_context->u.bulk.status == 0)
542 return; /* successful bulk submission, bulk
543 * completion will trigger callback
544 */
545
546 /* failed to submit buffer, this will end badly */
547 pr_err("error %d on bulk submission\n",
548 msg_context->u.bulk.status);
549
550 } else if (msg->u.buffer_from_host.payload_in_message <=
551 MMAL_VC_SHORT_DATA) {
552 /* data payload within message */
553 msg_context->u.bulk.status = inline_receive(instance, msg,
554 msg_context);
555 } else {
556 pr_err("message with invalid short payload\n");
557
558 /* signal error */
559 msg_context->u.bulk.status = -EINVAL;
560 msg_context->u.bulk.buffer_used =
561 msg->u.buffer_from_host.payload_in_message;
562 }
563
564 /* replace the buffer header */
565 port_buffer_from_host(instance, msg_context->u.bulk.port);
566
567 /* schedule the port callback */
568 schedule_work(&msg_context->u.bulk.work);
569 }
570
571 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
572 struct mmal_msg_context *msg_context)
573 {
574 /* bulk receive operation complete */
575 mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
576
577 /* replace the buffer header */
578 port_buffer_from_host(msg_context->u.bulk.instance,
579 msg_context->u.bulk.port);
580
581 msg_context->u.bulk.status = 0;
582
583 /* schedule the port callback */
584 schedule_work(&msg_context->u.bulk.work);
585 }
586
587 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
588 struct mmal_msg_context *msg_context)
589 {
590 pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
591
592 /* bulk receive operation complete */
593 mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
594
595 /* replace the buffer header */
596 port_buffer_from_host(msg_context->u.bulk.instance,
597 msg_context->u.bulk.port);
598
599 msg_context->u.bulk.status = -EINTR;
600
601 schedule_work(&msg_context->u.bulk.work);
602 }
603
604 /* incoming event service callback */
605 static void service_callback(void *param,
606 const VCHI_CALLBACK_REASON_T reason,
607 void *bulk_ctx)
608 {
609 struct vchiq_mmal_instance *instance = param;
610 int status;
611 u32 msg_len;
612 struct mmal_msg *msg;
613 VCHI_HELD_MSG_T msg_handle;
614
615 if (!instance) {
616 pr_err("Message callback passed NULL instance\n");
617 return;
618 }
619
620 switch (reason) {
621 case VCHI_CALLBACK_MSG_AVAILABLE:
622 status = vchi_msg_hold(instance->handle, (void **)&msg,
623 &msg_len, VCHI_FLAGS_NONE, &msg_handle);
624 if (status) {
625 pr_err("Unable to dequeue a message (%d)\n", status);
626 break;
627 }
628
629 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
630
631 /* handling is different for buffer messages */
632 switch (msg->h.type) {
633
634 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
635 vchi_held_msg_release(&msg_handle);
636 break;
637
638 case MMAL_MSG_TYPE_EVENT_TO_HOST:
639 event_to_host_cb(instance, msg, msg_len);
640 vchi_held_msg_release(&msg_handle);
641
642 break;
643
644 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
645 buffer_to_host_cb(instance, msg, msg_len);
646 vchi_held_msg_release(&msg_handle);
647 break;
648
649 default:
650 /* messages dependant on header context to complete */
651
652 /* todo: the msg.context really ought to be sanity
653 * checked before we just use it, afaict it comes back
654 * and is used raw from the videocore. Perhaps it
655 * should be verified the address lies in the kernel
656 * address space.
657 */
658 if (msg->h.context == NULL) {
659 pr_err("received message context was null!\n");
660 vchi_held_msg_release(&msg_handle);
661 break;
662 }
663
664 /* fill in context values */
665 msg->h.context->u.sync.msg_handle = msg_handle;
666 msg->h.context->u.sync.msg = msg;
667 msg->h.context->u.sync.msg_len = msg_len;
668
669 /* todo: should this check (completion_done()
670 * == 1) for no one waiting? or do we need a
671 * flag to tell us the completion has been
672 * interrupted so we can free the message and
673 * its context. This probably also solves the
674 * message arriving after interruption todo
675 * below
676 */
677
678 /* complete message so caller knows it happened */
679 complete(&msg->h.context->u.sync.cmplt);
680 break;
681 }
682
683 break;
684
685 case VCHI_CALLBACK_BULK_RECEIVED:
686 bulk_receive_cb(instance, bulk_ctx);
687 break;
688
689 case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
690 bulk_abort_cb(instance, bulk_ctx);
691 break;
692
693 case VCHI_CALLBACK_SERVICE_CLOSED:
694 /* TODO: consider if this requires action if received when
695 * driver is not explicitly closing the service
696 */
697 break;
698
699 default:
700 pr_err("Received unhandled message reason %d\n", reason);
701 break;
702 }
703 }
704
705 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
706 struct mmal_msg *msg,
707 unsigned int payload_len,
708 struct mmal_msg **msg_out,
709 VCHI_HELD_MSG_T *msg_handle_out)
710 {
711 struct mmal_msg_context msg_context;
712 int ret;
713
714 /* payload size must not cause message to exceed max size */
715 if (payload_len >
716 (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
717 pr_err("payload length %d exceeds max:%d\n", payload_len,
718 (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header)));
719 return -EINVAL;
720 }
721
722 init_completion(&msg_context.u.sync.cmplt);
723
724 msg->h.magic = MMAL_MAGIC;
725 msg->h.context = &msg_context;
726 msg->h.status = 0;
727
728 DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
729 ">>> sync message");
730
731 vchi_service_use(instance->handle);
732
733 ret = vchi_msg_queue(instance->handle,
734 msg,
735 sizeof(struct mmal_msg_header) + payload_len,
736 VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);
737
738 vchi_service_release(instance->handle);
739
740 if (ret) {
741 pr_err("error %d queuing message\n", ret);
742 return ret;
743 }
744
745 ret = wait_for_completion_timeout(&msg_context.u.sync.cmplt, 3*HZ);
746 if (ret <= 0) {
747 pr_err("error %d waiting for sync completion\n", ret);
748 if (ret == 0)
749 ret = -ETIME;
750 /* todo: what happens if the message arrives after aborting */
751 return ret;
752 }
753
754 *msg_out = msg_context.u.sync.msg;
755 *msg_handle_out = msg_context.u.sync.msg_handle;
756
757 return 0;
758 }
759
760 static void dump_port_info(struct vchiq_mmal_port *port)
761 {
762 pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
763
764 pr_debug("buffer minimum num:%d size:%d align:%d\n",
765 port->minimum_buffer.num,
766 port->minimum_buffer.size, port->minimum_buffer.alignment);
767
768 pr_debug("buffer recommended num:%d size:%d align:%d\n",
769 port->recommended_buffer.num,
770 port->recommended_buffer.size,
771 port->recommended_buffer.alignment);
772
773 pr_debug("buffer current values num:%d size:%d align:%d\n",
774 port->current_buffer.num,
775 port->current_buffer.size, port->current_buffer.alignment);
776
777 pr_debug("elementry stream: type:%d encoding:0x%x varient:0x%x\n",
778 port->format.type,
779 port->format.encoding, port->format.encoding_variant);
780
781 pr_debug(" bitrate:%d flags:0x%x\n",
782 port->format.bitrate, port->format.flags);
783
784 if (port->format.type == MMAL_ES_TYPE_VIDEO) {
785 pr_debug
786 ("es video format: width:%d height:%d colourspace:0x%x\n",
787 port->es.video.width, port->es.video.height,
788 port->es.video.color_space);
789
790 pr_debug(" : crop xywh %d,%d,%d,%d\n",
791 port->es.video.crop.x,
792 port->es.video.crop.y,
793 port->es.video.crop.width, port->es.video.crop.height);
794 pr_debug(" : framerate %d/%d aspect %d/%d\n",
795 port->es.video.frame_rate.num,
796 port->es.video.frame_rate.den,
797 port->es.video.par.num, port->es.video.par.den);
798 }
799 }
800
801 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
802 {
803
804 /* todo do readonly fields need setting at all? */
805 p->type = port->type;
806 p->index = port->index;
807 p->index_all = 0;
808 p->is_enabled = port->enabled;
809 p->buffer_num_min = port->minimum_buffer.num;
810 p->buffer_size_min = port->minimum_buffer.size;
811 p->buffer_alignment_min = port->minimum_buffer.alignment;
812 p->buffer_num_recommended = port->recommended_buffer.num;
813 p->buffer_size_recommended = port->recommended_buffer.size;
814
815 /* only three writable fields in a port */
816 p->buffer_num = port->current_buffer.num;
817 p->buffer_size = port->current_buffer.size;
818 p->userdata = port;
819 }
820
821 static int port_info_set(struct vchiq_mmal_instance *instance,
822 struct vchiq_mmal_port *port)
823 {
824 int ret;
825 struct mmal_msg m;
826 struct mmal_msg *rmsg;
827 VCHI_HELD_MSG_T rmsg_handle;
828
829 pr_debug("setting port info port %p\n", port);
830 if (!port)
831 return -1;
832 dump_port_info(port);
833
834 m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
835
836 m.u.port_info_set.component_handle = port->component->handle;
837 m.u.port_info_set.port_type = port->type;
838 m.u.port_info_set.port_index = port->index;
839
840 port_to_mmal_msg(port, &m.u.port_info_set.port);
841
842 /* elementry stream format setup */
843 m.u.port_info_set.format.type = port->format.type;
844 m.u.port_info_set.format.encoding = port->format.encoding;
845 m.u.port_info_set.format.encoding_variant =
846 port->format.encoding_variant;
847 m.u.port_info_set.format.bitrate = port->format.bitrate;
848 m.u.port_info_set.format.flags = port->format.flags;
849
850 memcpy(&m.u.port_info_set.es, &port->es,
851 sizeof(union mmal_es_specific_format));
852
853 m.u.port_info_set.format.extradata_size = port->format.extradata_size;
854 memcpy(&m.u.port_info_set.extradata, port->format.extradata,
855 port->format.extradata_size);
856
857 ret = send_synchronous_mmal_msg(instance, &m,
858 sizeof(m.u.port_info_set),
859 &rmsg, &rmsg_handle);
860 if (ret)
861 return ret;
862
863 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
864 /* got an unexpected message type in reply */
865 ret = -EINVAL;
866 goto release_msg;
867 }
868
869 /* return operation status */
870 ret = -rmsg->u.port_info_get_reply.status;
871
872 pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
873 port->component->handle, port->handle);
874
875 release_msg:
876 vchi_held_msg_release(&rmsg_handle);
877
878 return ret;
879
880 }
881
882 /* use port info get message to retrive port information */
883 static int port_info_get(struct vchiq_mmal_instance *instance,
884 struct vchiq_mmal_port *port)
885 {
886 int ret;
887 struct mmal_msg m;
888 struct mmal_msg *rmsg;
889 VCHI_HELD_MSG_T rmsg_handle;
890
891 /* port info time */
892 m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
893 m.u.port_info_get.component_handle = port->component->handle;
894 m.u.port_info_get.port_type = port->type;
895 m.u.port_info_get.index = port->index;
896
897 ret = send_synchronous_mmal_msg(instance, &m,
898 sizeof(m.u.port_info_get),
899 &rmsg, &rmsg_handle);
900 if (ret)
901 return ret;
902
903 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
904 /* got an unexpected message type in reply */
905 ret = -EINVAL;
906 goto release_msg;
907 }
908
909 /* return operation status */
910 ret = -rmsg->u.port_info_get_reply.status;
911 if (ret != MMAL_MSG_STATUS_SUCCESS)
912 goto release_msg;
913
914 if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
915 port->enabled = false;
916 else
917 port->enabled = true;
918
919 /* copy the values out of the message */
920 port->handle = rmsg->u.port_info_get_reply.port_handle;
921
922 /* port type and index cached to use on port info set becuase
923 * it does not use a port handle
924 */
925 port->type = rmsg->u.port_info_get_reply.port_type;
926 port->index = rmsg->u.port_info_get_reply.port_index;
927
928 port->minimum_buffer.num =
929 rmsg->u.port_info_get_reply.port.buffer_num_min;
930 port->minimum_buffer.size =
931 rmsg->u.port_info_get_reply.port.buffer_size_min;
932 port->minimum_buffer.alignment =
933 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
934
935 port->recommended_buffer.alignment =
936 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
937 port->recommended_buffer.num =
938 rmsg->u.port_info_get_reply.port.buffer_num_recommended;
939
940 port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
941 port->current_buffer.size =
942 rmsg->u.port_info_get_reply.port.buffer_size;
943
944 /* stream format */
945 port->format.type = rmsg->u.port_info_get_reply.format.type;
946 port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
947 port->format.encoding_variant =
948 rmsg->u.port_info_get_reply.format.encoding_variant;
949 port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
950 port->format.flags = rmsg->u.port_info_get_reply.format.flags;
951
952 /* elementry stream format */
953 memcpy(&port->es,
954 &rmsg->u.port_info_get_reply.es,
955 sizeof(union mmal_es_specific_format));
956 port->format.es = &port->es;
957
958 port->format.extradata_size =
959 rmsg->u.port_info_get_reply.format.extradata_size;
960 memcpy(port->format.extradata,
961 rmsg->u.port_info_get_reply.extradata,
962 port->format.extradata_size);
963
964 pr_debug("received port info\n");
965 dump_port_info(port);
966
967 release_msg:
968
969 pr_debug("%s:result:%d component:0x%x port:%d\n",
970 __func__, ret, port->component->handle, port->handle);
971
972 vchi_held_msg_release(&rmsg_handle);
973
974 return ret;
975 }
976
977 /* create comonent on vc */
978 static int create_component(struct vchiq_mmal_instance *instance,
979 struct vchiq_mmal_component *component,
980 const char *name)
981 {
982 int ret;
983 struct mmal_msg m;
984 struct mmal_msg *rmsg;
985 VCHI_HELD_MSG_T rmsg_handle;
986
987 /* build component create message */
988 m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
989 m.u.component_create.client_component = component;
990 strncpy(m.u.component_create.name, name,
991 sizeof(m.u.component_create.name));
992
993 ret = send_synchronous_mmal_msg(instance, &m,
994 sizeof(m.u.component_create),
995 &rmsg, &rmsg_handle);
996 if (ret)
997 return ret;
998
999 if (rmsg->h.type != m.h.type) {
1000 /* got an unexpected message type in reply */
1001 ret = -EINVAL;
1002 goto release_msg;
1003 }
1004
1005 ret = -rmsg->u.component_create_reply.status;
1006 if (ret != MMAL_MSG_STATUS_SUCCESS)
1007 goto release_msg;
1008
1009 /* a valid component response received */
1010 component->handle = rmsg->u.component_create_reply.component_handle;
1011 component->inputs = rmsg->u.component_create_reply.input_num;
1012 component->outputs = rmsg->u.component_create_reply.output_num;
1013 component->clocks = rmsg->u.component_create_reply.clock_num;
1014
1015 pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
1016 component->handle,
1017 component->inputs, component->outputs, component->clocks);
1018
1019 release_msg:
1020 vchi_held_msg_release(&rmsg_handle);
1021
1022 return ret;
1023 }
1024
1025 /* destroys a component on vc */
1026 static int destroy_component(struct vchiq_mmal_instance *instance,
1027 struct vchiq_mmal_component *component)
1028 {
1029 int ret;
1030 struct mmal_msg m;
1031 struct mmal_msg *rmsg;
1032 VCHI_HELD_MSG_T rmsg_handle;
1033
1034 m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
1035 m.u.component_destroy.component_handle = component->handle;
1036
1037 ret = send_synchronous_mmal_msg(instance, &m,
1038 sizeof(m.u.component_destroy),
1039 &rmsg, &rmsg_handle);
1040 if (ret)
1041 return ret;
1042
1043 if (rmsg->h.type != m.h.type) {
1044 /* got an unexpected message type in reply */
1045 ret = -EINVAL;
1046 goto release_msg;
1047 }
1048
1049 ret = -rmsg->u.component_destroy_reply.status;
1050
1051 release_msg:
1052
1053 vchi_held_msg_release(&rmsg_handle);
1054
1055 return ret;
1056 }
1057
1058 /* enable a component on vc */
1059 static int enable_component(struct vchiq_mmal_instance *instance,
1060 struct vchiq_mmal_component *component)
1061 {
1062 int ret;
1063 struct mmal_msg m;
1064 struct mmal_msg *rmsg;
1065 VCHI_HELD_MSG_T rmsg_handle;
1066
1067 m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1068 m.u.component_enable.component_handle = component->handle;
1069
1070 ret = send_synchronous_mmal_msg(instance, &m,
1071 sizeof(m.u.component_enable),
1072 &rmsg, &rmsg_handle);
1073 if (ret)
1074 return ret;
1075
1076 if (rmsg->h.type != m.h.type) {
1077 /* got an unexpected message type in reply */
1078 ret = -EINVAL;
1079 goto release_msg;
1080 }
1081
1082 ret = -rmsg->u.component_enable_reply.status;
1083
1084 release_msg:
1085 vchi_held_msg_release(&rmsg_handle);
1086
1087 return ret;
1088 }
1089
1090 /* disable a component on vc */
1091 static int disable_component(struct vchiq_mmal_instance *instance,
1092 struct vchiq_mmal_component *component)
1093 {
1094 int ret;
1095 struct mmal_msg m;
1096 struct mmal_msg *rmsg;
1097 VCHI_HELD_MSG_T rmsg_handle;
1098
1099 m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1100 m.u.component_disable.component_handle = component->handle;
1101
1102 ret = send_synchronous_mmal_msg(instance, &m,
1103 sizeof(m.u.component_disable),
1104 &rmsg, &rmsg_handle);
1105 if (ret)
1106 return ret;
1107
1108 if (rmsg->h.type != m.h.type) {
1109 /* got an unexpected message type in reply */
1110 ret = -EINVAL;
1111 goto release_msg;
1112 }
1113
1114 ret = -rmsg->u.component_disable_reply.status;
1115
1116 release_msg:
1117
1118 vchi_held_msg_release(&rmsg_handle);
1119
1120 return ret;
1121 }
1122
1123 /* get version of mmal implementation */
1124 static int get_version(struct vchiq_mmal_instance *instance,
1125 u32 *major_out, u32 *minor_out)
1126 {
1127 int ret;
1128 struct mmal_msg m;
1129 struct mmal_msg *rmsg;
1130 VCHI_HELD_MSG_T rmsg_handle;
1131
1132 m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1133
1134 ret = send_synchronous_mmal_msg(instance, &m,
1135 sizeof(m.u.version),
1136 &rmsg, &rmsg_handle);
1137 if (ret)
1138 return ret;
1139
1140 if (rmsg->h.type != m.h.type) {
1141 /* got an unexpected message type in reply */
1142 ret = -EINVAL;
1143 goto release_msg;
1144 }
1145
1146 *major_out = rmsg->u.version.major;
1147 *minor_out = rmsg->u.version.minor;
1148
1149 release_msg:
1150 vchi_held_msg_release(&rmsg_handle);
1151
1152 return ret;
1153 }
1154
1155 /* do a port action with a port as a parameter */
1156 static int port_action_port(struct vchiq_mmal_instance *instance,
1157 struct vchiq_mmal_port *port,
1158 enum mmal_msg_port_action_type action_type)
1159 {
1160 int ret;
1161 struct mmal_msg m;
1162 struct mmal_msg *rmsg;
1163 VCHI_HELD_MSG_T rmsg_handle;
1164
1165 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1166 m.u.port_action_port.component_handle = port->component->handle;
1167 m.u.port_action_port.port_handle = port->handle;
1168 m.u.port_action_port.action = action_type;
1169
1170 port_to_mmal_msg(port, &m.u.port_action_port.port);
1171
1172 ret = send_synchronous_mmal_msg(instance, &m,
1173 sizeof(m.u.port_action_port),
1174 &rmsg, &rmsg_handle);
1175 if (ret)
1176 return ret;
1177
1178 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1179 /* got an unexpected message type in reply */
1180 ret = -EINVAL;
1181 goto release_msg;
1182 }
1183
1184 ret = -rmsg->u.port_action_reply.status;
1185
1186 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1187 __func__,
1188 ret, port->component->handle, port->handle,
1189 port_action_type_names[action_type], action_type);
1190
1191 release_msg:
1192 vchi_held_msg_release(&rmsg_handle);
1193
1194 return ret;
1195 }
1196
1197 /* do a port action with handles as parameters */
1198 static int port_action_handle(struct vchiq_mmal_instance *instance,
1199 struct vchiq_mmal_port *port,
1200 enum mmal_msg_port_action_type action_type,
1201 u32 connect_component_handle,
1202 u32 connect_port_handle)
1203 {
1204 int ret;
1205 struct mmal_msg m;
1206 struct mmal_msg *rmsg;
1207 VCHI_HELD_MSG_T rmsg_handle;
1208
1209 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1210
1211 m.u.port_action_handle.component_handle = port->component->handle;
1212 m.u.port_action_handle.port_handle = port->handle;
1213 m.u.port_action_handle.action = action_type;
1214
1215 m.u.port_action_handle.connect_component_handle =
1216 connect_component_handle;
1217 m.u.port_action_handle.connect_port_handle = connect_port_handle;
1218
1219 ret = send_synchronous_mmal_msg(instance, &m,
1220 sizeof(m.u.port_action_handle),
1221 &rmsg, &rmsg_handle);
1222 if (ret)
1223 return ret;
1224
1225 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1226 /* got an unexpected message type in reply */
1227 ret = -EINVAL;
1228 goto release_msg;
1229 }
1230
1231 ret = -rmsg->u.port_action_reply.status;
1232
1233 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)" \
1234 " connect component:0x%x connect port:%d\n",
1235 __func__,
1236 ret, port->component->handle, port->handle,
1237 port_action_type_names[action_type],
1238 action_type, connect_component_handle, connect_port_handle);
1239
1240 release_msg:
1241 vchi_held_msg_release(&rmsg_handle);
1242
1243 return ret;
1244 }
1245
1246 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1247 struct vchiq_mmal_port *port,
1248 u32 parameter_id, void *value, u32 value_size)
1249 {
1250 int ret;
1251 struct mmal_msg m;
1252 struct mmal_msg *rmsg;
1253 VCHI_HELD_MSG_T rmsg_handle;
1254
1255 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1256
1257 m.u.port_parameter_set.component_handle = port->component->handle;
1258 m.u.port_parameter_set.port_handle = port->handle;
1259 m.u.port_parameter_set.id = parameter_id;
1260 m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1261 memcpy(&m.u.port_parameter_set.value, value, value_size);
1262
1263 ret = send_synchronous_mmal_msg(instance, &m,
1264 (4 * sizeof(u32)) + value_size,
1265 &rmsg, &rmsg_handle);
1266 if (ret)
1267 return ret;
1268
1269 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1270 /* got an unexpected message type in reply */
1271 ret = -EINVAL;
1272 goto release_msg;
1273 }
1274
1275 ret = -rmsg->u.port_parameter_set_reply.status;
1276
1277 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1278 __func__,
1279 ret, port->component->handle, port->handle, parameter_id);
1280
1281 release_msg:
1282 vchi_held_msg_release(&rmsg_handle);
1283
1284 return ret;
1285 }
1286
1287 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1288 struct vchiq_mmal_port *port,
1289 u32 parameter_id, void *value, u32 *value_size)
1290 {
1291 int ret;
1292 struct mmal_msg m;
1293 struct mmal_msg *rmsg;
1294 VCHI_HELD_MSG_T rmsg_handle;
1295 u32 reply_size;
1296
1297 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1298
1299 m.u.port_parameter_get.component_handle = port->component->handle;
1300 m.u.port_parameter_get.port_handle = port->handle;
1301 m.u.port_parameter_get.id = parameter_id;
1302 m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1303
1304 ret = send_synchronous_mmal_msg(instance, &m,
1305 sizeof(struct
1306 mmal_msg_port_parameter_get),
1307 &rmsg, &rmsg_handle);
1308 if (ret)
1309 return ret;
1310
1311 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1312 /* got an unexpected message type in reply */
1313 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1314 ret = -EINVAL;
1315 goto release_msg;
1316 }
1317
1318 ret = -rmsg->u.port_parameter_get_reply.status;
1319 /*
1320 * port_parameter_get_reply.size includes the header,
1321 * whilst *value_size doesn't.
1322 */
1323 reply_size = rmsg->u.port_parameter_get_reply.size - (2 * sizeof(u32));
1324
1325 if (ret || (reply_size > *value_size)) {
1326 /* Copy only as much as we have space for
1327 * but report true size of parameter
1328 */
1329 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1330 *value_size);
1331 } else
1332 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1333 reply_size);
1334
1335 /*
1336 * Return amount of data copied if big enough,
1337 * or wanted if not big enough.
1338 */
1339 *value_size = reply_size;
1340
1341 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1342 ret, port->component->handle, port->handle, parameter_id);
1343
1344 release_msg:
1345 vchi_held_msg_release(&rmsg_handle);
1346
1347 return ret;
1348 }
1349
1350 /* disables a port and drains buffers from it */
1351 static int port_disable(struct vchiq_mmal_instance *instance,
1352 struct vchiq_mmal_port *port)
1353 {
1354 int ret;
1355 struct list_head *q, *buf_head;
1356 unsigned long flags = 0;
1357
1358 if (!port->enabled)
1359 return 0;
1360
1361 port->enabled = false;
1362
1363 ret = port_action_port(instance, port,
1364 MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1365 if (ret == 0) {
1366
1367 /* drain all queued buffers on port */
1368 spin_lock_irqsave(&port->slock, flags);
1369
1370 list_for_each_safe(buf_head, q, &port->buffers) {
1371 struct mmal_buffer *mmalbuf;
1372 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1373 list);
1374 list_del(buf_head);
1375 if (port->buffer_cb)
1376 port->buffer_cb(instance,
1377 port, 0, mmalbuf, 0, 0,
1378 MMAL_TIME_UNKNOWN,
1379 MMAL_TIME_UNKNOWN);
1380 }
1381
1382 spin_unlock_irqrestore(&port->slock, flags);
1383
1384 ret = port_info_get(instance, port);
1385 }
1386
1387 return ret;
1388 }
1389
1390 /* enable a port */
1391 static int port_enable(struct vchiq_mmal_instance *instance,
1392 struct vchiq_mmal_port *port)
1393 {
1394 unsigned int hdr_count;
1395 struct list_head *buf_head;
1396 int ret;
1397
1398 if (port->enabled)
1399 return 0;
1400
1401 /* ensure there are enough buffers queued to cover the buffer headers */
1402 if (port->buffer_cb != NULL) {
1403 hdr_count = 0;
1404 list_for_each(buf_head, &port->buffers) {
1405 hdr_count++;
1406 }
1407 if (hdr_count < port->current_buffer.num)
1408 return -ENOSPC;
1409 }
1410
1411 ret = port_action_port(instance, port,
1412 MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1413 if (ret)
1414 goto done;
1415
1416 port->enabled = true;
1417
1418 if (port->buffer_cb) {
1419 /* send buffer headers to videocore */
1420 hdr_count = 1;
1421 list_for_each(buf_head, &port->buffers) {
1422 struct mmal_buffer *mmalbuf;
1423 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1424 list);
1425 ret = buffer_from_host(instance, port, mmalbuf);
1426 if (ret)
1427 goto done;
1428
1429 hdr_count++;
1430 if (hdr_count > port->current_buffer.num)
1431 break;
1432 }
1433 }
1434
1435 ret = port_info_get(instance, port);
1436
1437 done:
1438 return ret;
1439 }
1440
1441 /* ------------------------------------------------------------------
1442 * Exported API
1443 *------------------------------------------------------------------*/
1444
1445 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1446 struct vchiq_mmal_port *port)
1447 {
1448 int ret;
1449
1450 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1451 return -EINTR;
1452
1453 ret = port_info_set(instance, port);
1454 if (ret)
1455 goto release_unlock;
1456
1457 /* read what has actually been set */
1458 ret = port_info_get(instance, port);
1459
1460 release_unlock:
1461 mutex_unlock(&instance->vchiq_mutex);
1462
1463 return ret;
1464
1465 }
1466
1467 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1468 struct vchiq_mmal_port *port,
1469 u32 parameter, void *value, u32 value_size)
1470 {
1471 int ret;
1472
1473 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1474 return -EINTR;
1475
1476 ret = port_parameter_set(instance, port, parameter, value, value_size);
1477
1478 mutex_unlock(&instance->vchiq_mutex);
1479
1480 return ret;
1481 }
1482
1483 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1484 struct vchiq_mmal_port *port,
1485 u32 parameter, void *value, u32 *value_size)
1486 {
1487 int ret;
1488
1489 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1490 return -EINTR;
1491
1492 ret = port_parameter_get(instance, port, parameter, value, value_size);
1493
1494 mutex_unlock(&instance->vchiq_mutex);
1495
1496 return ret;
1497 }
1498
1499 /* enable a port
1500 *
1501 * enables a port and queues buffers for satisfying callbacks if we
1502 * provide a callback handler
1503 */
1504 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1505 struct vchiq_mmal_port *port,
1506 vchiq_mmal_buffer_cb buffer_cb)
1507 {
1508 int ret;
1509
1510 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1511 return -EINTR;
1512
1513 /* already enabled - noop */
1514 if (port->enabled) {
1515 ret = 0;
1516 goto unlock;
1517 }
1518
1519 port->buffer_cb = buffer_cb;
1520
1521 ret = port_enable(instance, port);
1522
1523 unlock:
1524 mutex_unlock(&instance->vchiq_mutex);
1525
1526 return ret;
1527 }
1528
1529 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1530 struct vchiq_mmal_port *port)
1531 {
1532 int ret;
1533
1534 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1535 return -EINTR;
1536
1537 if (!port->enabled) {
1538 mutex_unlock(&instance->vchiq_mutex);
1539 return 0;
1540 }
1541
1542 ret = port_disable(instance, port);
1543
1544 mutex_unlock(&instance->vchiq_mutex);
1545
1546 return ret;
1547 }
1548
1549 /* ports will be connected in a tunneled manner so data buffers
1550 * are not handled by client.
1551 */
1552 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1553 struct vchiq_mmal_port *src,
1554 struct vchiq_mmal_port *dst)
1555 {
1556 int ret;
1557
1558 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1559 return -EINTR;
1560
1561 /* disconnect ports if connected */
1562 if (src->connected != NULL) {
1563 ret = port_disable(instance, src);
1564 if (ret) {
1565 pr_err("failed disabling src port(%d)\n", ret);
1566 goto release_unlock;
1567 }
1568
1569 /* do not need to disable the destination port as they
1570 * are connected and it is done automatically
1571 */
1572
1573 ret = port_action_handle(instance, src,
1574 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1575 src->connected->component->handle,
1576 src->connected->handle);
1577 if (ret < 0) {
1578 pr_err("failed disconnecting src port\n");
1579 goto release_unlock;
1580 }
1581 src->connected->enabled = false;
1582 src->connected = NULL;
1583 }
1584
1585 if (dst == NULL) {
1586 /* do not make new connection */
1587 ret = 0;
1588 pr_debug("not making new connection\n");
1589 goto release_unlock;
1590 }
1591
1592 /* copy src port format to dst */
1593 dst->format.encoding = src->format.encoding;
1594 dst->es.video.width = src->es.video.width;
1595 dst->es.video.height = src->es.video.height;
1596 dst->es.video.crop.x = src->es.video.crop.x;
1597 dst->es.video.crop.y = src->es.video.crop.y;
1598 dst->es.video.crop.width = src->es.video.crop.width;
1599 dst->es.video.crop.height = src->es.video.crop.height;
1600 dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1601 dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1602
1603 /* set new format */
1604 ret = port_info_set(instance, dst);
1605 if (ret) {
1606 pr_debug("setting port info failed\n");
1607 goto release_unlock;
1608 }
1609
1610 /* read what has actually been set */
1611 ret = port_info_get(instance, dst);
1612 if (ret) {
1613 pr_debug("read back port info failed\n");
1614 goto release_unlock;
1615 }
1616
1617 /* connect two ports together */
1618 ret = port_action_handle(instance, src,
1619 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1620 dst->component->handle, dst->handle);
1621 if (ret < 0) {
1622 pr_debug("connecting port %d:%d to %d:%d failed\n",
1623 src->component->handle, src->handle,
1624 dst->component->handle, dst->handle);
1625 goto release_unlock;
1626 }
1627 src->connected = dst;
1628
1629 release_unlock:
1630
1631 mutex_unlock(&instance->vchiq_mutex);
1632
1633 return ret;
1634 }
1635
1636 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1637 struct vchiq_mmal_port *port,
1638 struct mmal_buffer *buffer)
1639 {
1640 unsigned long flags = 0;
1641
1642 spin_lock_irqsave(&port->slock, flags);
1643 list_add_tail(&buffer->list, &port->buffers);
1644 spin_unlock_irqrestore(&port->slock, flags);
1645
1646 /* the port previously underflowed because it was missing a
1647 * mmal_buffer which has just been added, submit that buffer
1648 * to the mmal service.
1649 */
1650 if (port->buffer_underflow) {
1651 port_buffer_from_host(instance, port);
1652 port->buffer_underflow--;
1653 }
1654
1655 return 0;
1656 }
1657
1658 /* Initialise a mmal component and its ports
1659 *
1660 */
1661 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1662 const char *name,
1663 struct vchiq_mmal_component **component_out)
1664 {
1665 int ret;
1666 int idx; /* port index */
1667 struct vchiq_mmal_component *component;
1668
1669 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1670 return -EINTR;
1671
1672 if (instance->component_idx == VCHIQ_MMAL_MAX_COMPONENTS) {
1673 ret = -EINVAL; /* todo is this correct error? */
1674 goto unlock;
1675 }
1676
1677 component = &instance->component[instance->component_idx];
1678
1679 ret = create_component(instance, component, name);
1680 if (ret < 0)
1681 goto unlock;
1682
1683 /* ports info needs gathering */
1684 component->control.type = MMAL_PORT_TYPE_CONTROL;
1685 component->control.index = 0;
1686 component->control.component = component;
1687 spin_lock_init(&component->control.slock);
1688 INIT_LIST_HEAD(&component->control.buffers);
1689 ret = port_info_get(instance, &component->control);
1690 if (ret < 0)
1691 goto release_component;
1692
1693 for (idx = 0; idx < component->inputs; idx++) {
1694 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1695 component->input[idx].index = idx;
1696 component->input[idx].component = component;
1697 spin_lock_init(&component->input[idx].slock);
1698 INIT_LIST_HEAD(&component->input[idx].buffers);
1699 ret = port_info_get(instance, &component->input[idx]);
1700 if (ret < 0)
1701 goto release_component;
1702 }
1703
1704 for (idx = 0; idx < component->outputs; idx++) {
1705 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1706 component->output[idx].index = idx;
1707 component->output[idx].component = component;
1708 spin_lock_init(&component->output[idx].slock);
1709 INIT_LIST_HEAD(&component->output[idx].buffers);
1710 ret = port_info_get(instance, &component->output[idx]);
1711 if (ret < 0)
1712 goto release_component;
1713 }
1714
1715 for (idx = 0; idx < component->clocks; idx++) {
1716 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1717 component->clock[idx].index = idx;
1718 component->clock[idx].component = component;
1719 spin_lock_init(&component->clock[idx].slock);
1720 INIT_LIST_HEAD(&component->clock[idx].buffers);
1721 ret = port_info_get(instance, &component->clock[idx]);
1722 if (ret < 0)
1723 goto release_component;
1724 }
1725
1726 instance->component_idx++;
1727
1728 *component_out = component;
1729
1730 mutex_unlock(&instance->vchiq_mutex);
1731
1732 return 0;
1733
1734 release_component:
1735 destroy_component(instance, component);
1736 unlock:
1737 mutex_unlock(&instance->vchiq_mutex);
1738
1739 return ret;
1740 }
1741
1742 /*
1743 * cause a mmal component to be destroyed
1744 */
1745 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1746 struct vchiq_mmal_component *component)
1747 {
1748 int ret;
1749
1750 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1751 return -EINTR;
1752
1753 if (component->enabled)
1754 ret = disable_component(instance, component);
1755
1756 ret = destroy_component(instance, component);
1757
1758 mutex_unlock(&instance->vchiq_mutex);
1759
1760 return ret;
1761 }
1762
1763 /*
1764 * cause a mmal component to be enabled
1765 */
1766 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1767 struct vchiq_mmal_component *component)
1768 {
1769 int ret;
1770
1771 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1772 return -EINTR;
1773
1774 if (component->enabled) {
1775 mutex_unlock(&instance->vchiq_mutex);
1776 return 0;
1777 }
1778
1779 ret = enable_component(instance, component);
1780 if (ret == 0)
1781 component->enabled = true;
1782
1783 mutex_unlock(&instance->vchiq_mutex);
1784
1785 return ret;
1786 }
1787
1788 /*
1789 * cause a mmal component to be enabled
1790 */
1791 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1792 struct vchiq_mmal_component *component)
1793 {
1794 int ret;
1795
1796 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1797 return -EINTR;
1798
1799 if (!component->enabled) {
1800 mutex_unlock(&instance->vchiq_mutex);
1801 return 0;
1802 }
1803
1804 ret = disable_component(instance, component);
1805 if (ret == 0)
1806 component->enabled = false;
1807
1808 mutex_unlock(&instance->vchiq_mutex);
1809
1810 return ret;
1811 }
1812
1813 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1814 u32 *major_out, u32 *minor_out)
1815 {
1816 int ret;
1817
1818 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1819 return -EINTR;
1820
1821 ret = get_version(instance, major_out, minor_out);
1822
1823 mutex_unlock(&instance->vchiq_mutex);
1824
1825 return ret;
1826 }
1827
1828 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1829 {
1830 int status = 0;
1831
1832 if (instance == NULL)
1833 return -EINVAL;
1834
1835 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1836 return -EINTR;
1837
1838 vchi_service_use(instance->handle);
1839
1840 status = vchi_service_close(instance->handle);
1841 if (status != 0)
1842 pr_err("mmal-vchiq: VCHIQ close failed");
1843
1844 mutex_unlock(&instance->vchiq_mutex);
1845
1846 vfree(instance->bulk_scratch);
1847
1848 kfree(instance);
1849
1850 return status;
1851 }
1852
1853 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1854 {
1855 int status;
1856 struct vchiq_mmal_instance *instance;
1857 static VCHI_CONNECTION_T *vchi_connection;
1858 static VCHI_INSTANCE_T vchi_instance;
1859 SERVICE_CREATION_T params = {
1860 VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
1861 VC_MMAL_SERVER_NAME,
1862 vchi_connection,
1863 0, /* rx fifo size (unused) */
1864 0, /* tx fifo size (unused) */
1865 service_callback,
1866 NULL, /* service callback parameter */
1867 1, /* unaligned bulk receives */
1868 1, /* unaligned bulk transmits */
1869 0 /* want crc check on bulk transfers */
1870 };
1871
1872 /* compile time checks to ensure structure size as they are
1873 * directly (de)serialised from memory.
1874 */
1875
1876 /* ensure the header structure has packed to the correct size */
1877 BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1878
1879 /* ensure message structure does not exceed maximum length */
1880 BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1881
1882 /* mmal port struct is correct size */
1883 BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1884
1885 /* create a vchi instance */
1886 status = vchi_initialise(&vchi_instance);
1887 if (status) {
1888 pr_err("Failed to initialise VCHI instance (status=%d)\n",
1889 status);
1890 return -EIO;
1891 }
1892
1893 status = vchi_connect(NULL, 0, vchi_instance);
1894 if (status) {
1895 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1896 return -EIO;
1897 }
1898
1899 instance = kmalloc(sizeof(*instance), GFP_KERNEL);
1900 memset(instance, 0, sizeof(*instance));
1901
1902 mutex_init(&instance->vchiq_mutex);
1903 mutex_init(&instance->bulk_mutex);
1904
1905 instance->bulk_scratch = vmalloc(PAGE_SIZE);
1906
1907 params.callback_param = instance;
1908
1909 status = vchi_service_open(vchi_instance, &params, &instance->handle);
1910 if (status) {
1911 pr_err("Failed to open VCHI service connection (status=%d)\n",
1912 status);
1913 goto err_close_services;
1914 }
1915
1916 vchi_service_release(instance->handle);
1917
1918 *out_instance = instance;
1919
1920 return 0;
1921
1922 err_close_services:
1923
1924 vchi_service_close(instance->handle);
1925 vfree(instance->bulk_scratch);
1926 kfree(instance);
1927 return -ENODEV;
1928 }