2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
3 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The names of the above-listed copyright holders may not be used
15 * to endorse or promote products derived from this software without
16 * specific prior written permission.
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2, as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/cdev.h>
41 #include <linux/device.h>
43 #include <linux/highmem.h>
44 #include <linux/pagemap.h>
45 #include <linux/bug.h>
46 #include <linux/semaphore.h>
47 #include <linux/list.h>
49 #include <linux/platform_device.h>
50 #include <soc/bcm2835/raspberrypi-firmware.h>
52 #include "vchiq_core.h"
53 #include "vchiq_ioctl.h"
54 #include "vchiq_arm.h"
55 #include "vchiq_debugfs.h"
56 #include "vchiq_killable.h"
58 #define DEVICE_NAME "vchiq"
60 /* Override the default prefix, which would be vchiq_arm (from the filename) */
61 #undef MODULE_PARAM_PREFIX
62 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
66 /* Some per-instance constants */
67 #define MAX_COMPLETIONS 16
68 #define MAX_SERVICES 64
69 #define MAX_ELEMENTS 8
70 #define MSG_QUEUE_SIZE 64
72 #define KEEPALIVE_VER 1
73 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
75 /* Run time control of log level, based on KERN_XXX level. */
76 int vchiq_arm_log_level
= VCHIQ_LOG_DEFAULT
;
77 int vchiq_susp_log_level
= VCHIQ_LOG_ERROR
;
79 #define SUSPEND_TIMER_TIMEOUT_MS 100
80 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
82 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
83 static const char *const suspend_state_names
[] = {
84 "VC_SUSPEND_FORCE_CANCELED",
85 "VC_SUSPEND_REJECTED",
88 "VC_SUSPEND_REQUESTED",
89 "VC_SUSPEND_IN_PROGRESS",
90 "VC_SUSPEND_SUSPENDED"
92 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
93 static const char *const resume_state_names
[] = {
96 "VC_RESUME_REQUESTED",
97 "VC_RESUME_IN_PROGRESS",
100 /* The number of times we allow force suspend to timeout before actually
101 ** _forcing_ suspend. This is to cater for SW which fails to release vchiq
102 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
104 #define FORCE_SUSPEND_FAIL_MAX 8
106 /* The time in ms allowed for videocore to go idle when force suspend has been
108 #define FORCE_SUSPEND_TIMEOUT_MS 200
111 static void suspend_timer_callback(unsigned long context
);
114 typedef struct user_service_struct
{
115 VCHIQ_SERVICE_T
*service
;
117 VCHIQ_INSTANCE_T instance
;
119 char dequeue_pending
;
121 int message_available_pos
;
124 struct semaphore insert_event
;
125 struct semaphore remove_event
;
126 struct semaphore close_event
;
127 VCHIQ_HEADER_T
* msg_queue
[MSG_QUEUE_SIZE
];
130 struct bulk_waiter_node
{
131 struct bulk_waiter bulk_waiter
;
133 struct list_head list
;
136 struct vchiq_instance_struct
{
137 VCHIQ_STATE_T
*state
;
138 VCHIQ_COMPLETION_DATA_T completions
[MAX_COMPLETIONS
];
139 int completion_insert
;
140 int completion_remove
;
141 struct semaphore insert_event
;
142 struct semaphore remove_event
;
143 struct mutex completion_mutex
;
149 int use_close_delivered
;
152 struct list_head bulk_waiter_list
;
153 struct mutex bulk_waiter_list_mutex
;
155 VCHIQ_DEBUGFS_NODE_T debugfs_node
;
158 typedef struct dump_context_struct
{
165 static struct cdev vchiq_cdev
;
166 static dev_t vchiq_devid
;
167 static VCHIQ_STATE_T g_state
;
168 static struct class *vchiq_class
;
169 static struct device
*vchiq_dev
;
170 static DEFINE_SPINLOCK(msg_queue_spinlock
);
172 static const char *const ioctl_names
[] = {
178 "QUEUE_BULK_TRANSMIT",
179 "QUEUE_BULK_RECEIVE",
187 "SET_SERVICE_OPTION",
193 vchiq_static_assert(ARRAY_SIZE(ioctl_names
) ==
194 (VCHIQ_IOC_MAX
+ 1));
197 dump_phys_mem(void *virt_addr
, uint32_t num_bytes
);
199 /****************************************************************************
203 ***************************************************************************/
205 static VCHIQ_STATUS_T
206 add_completion(VCHIQ_INSTANCE_T instance
, VCHIQ_REASON_T reason
,
207 VCHIQ_HEADER_T
*header
, USER_SERVICE_T
*user_service
,
210 VCHIQ_COMPLETION_DATA_T
*completion
;
211 DEBUG_INITIALISE(g_state
.local
)
213 while (instance
->completion_insert
==
214 (instance
->completion_remove
+ MAX_COMPLETIONS
)) {
215 /* Out of space - wait for the client */
216 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
217 vchiq_log_trace(vchiq_arm_log_level
,
218 "add_completion - completion queue full");
219 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT
);
220 if (down_interruptible(&instance
->remove_event
) != 0) {
221 vchiq_log_info(vchiq_arm_log_level
,
222 "service_callback interrupted");
224 } else if (instance
->closing
) {
225 vchiq_log_info(vchiq_arm_log_level
,
226 "service_callback closing");
229 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
233 &instance
->completions
[instance
->completion_insert
&
234 (MAX_COMPLETIONS
- 1)];
236 completion
->header
= header
;
237 completion
->reason
= reason
;
238 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
239 completion
->service_userdata
= user_service
->service
;
240 completion
->bulk_userdata
= bulk_userdata
;
242 if (reason
== VCHIQ_SERVICE_CLOSED
) {
243 /* Take an extra reference, to be held until
244 this CLOSED notification is delivered. */
245 lock_service(user_service
->service
);
246 if (instance
->use_close_delivered
)
247 user_service
->close_pending
= 1;
250 /* A write barrier is needed here to ensure that the entire completion
251 record is written out before the insert point. */
254 if (reason
== VCHIQ_MESSAGE_AVAILABLE
)
255 user_service
->message_available_pos
=
256 instance
->completion_insert
;
257 instance
->completion_insert
++;
259 up(&instance
->insert_event
);
261 return VCHIQ_SUCCESS
;
264 /****************************************************************************
268 ***************************************************************************/
270 static VCHIQ_STATUS_T
271 service_callback(VCHIQ_REASON_T reason
, VCHIQ_HEADER_T
*header
,
272 VCHIQ_SERVICE_HANDLE_T handle
, void *bulk_userdata
)
274 /* How do we ensure the callback goes to the right client?
275 ** The service_user data points to a USER_SERVICE_T record containing
276 ** the original callback and the user state structure, which contains a
277 ** circular buffer for completion records.
279 USER_SERVICE_T
*user_service
;
280 VCHIQ_SERVICE_T
*service
;
281 VCHIQ_INSTANCE_T instance
;
282 DEBUG_INITIALISE(g_state
.local
)
284 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
286 service
= handle_to_service(handle
);
288 user_service
= (USER_SERVICE_T
*)service
->base
.userdata
;
289 instance
= user_service
->instance
;
291 if (!instance
|| instance
->closing
)
292 return VCHIQ_SUCCESS
;
294 vchiq_log_trace(vchiq_arm_log_level
,
295 "service_callback - service %lx(%d,%p), reason %d, header %lx, "
296 "instance %lx, bulk_userdata %lx",
297 (unsigned long)user_service
,
298 service
->localport
, user_service
->userdata
,
299 reason
, (unsigned long)header
,
300 (unsigned long)instance
, (unsigned long)bulk_userdata
);
302 if (header
&& user_service
->is_vchi
) {
303 spin_lock(&msg_queue_spinlock
);
304 while (user_service
->msg_insert
==
305 (user_service
->msg_remove
+ MSG_QUEUE_SIZE
)) {
306 spin_unlock(&msg_queue_spinlock
);
307 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
308 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT
);
309 vchiq_log_trace(vchiq_arm_log_level
,
310 "service_callback - msg queue full");
311 /* If there is no MESSAGE_AVAILABLE in the completion
314 if ((user_service
->message_available_pos
-
315 instance
->completion_remove
) < 0) {
316 VCHIQ_STATUS_T status
;
317 vchiq_log_info(vchiq_arm_log_level
,
318 "Inserting extra MESSAGE_AVAILABLE");
319 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
320 status
= add_completion(instance
, reason
,
321 NULL
, user_service
, bulk_userdata
);
322 if (status
!= VCHIQ_SUCCESS
) {
323 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
328 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
329 if (down_interruptible(&user_service
->remove_event
)
331 vchiq_log_info(vchiq_arm_log_level
,
332 "service_callback interrupted");
333 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
335 } else if (instance
->closing
) {
336 vchiq_log_info(vchiq_arm_log_level
,
337 "service_callback closing");
338 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
341 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
342 spin_lock(&msg_queue_spinlock
);
345 user_service
->msg_queue
[user_service
->msg_insert
&
346 (MSG_QUEUE_SIZE
- 1)] = header
;
347 user_service
->msg_insert
++;
348 spin_unlock(&msg_queue_spinlock
);
350 up(&user_service
->insert_event
);
352 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
353 ** there is a MESSAGE_AVAILABLE in the completion queue then
354 ** bypass the completion queue.
356 if (((user_service
->message_available_pos
-
357 instance
->completion_remove
) >= 0) ||
358 user_service
->dequeue_pending
) {
359 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
360 user_service
->dequeue_pending
= 0;
361 return VCHIQ_SUCCESS
;
366 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
368 return add_completion(instance
, reason
, header
, user_service
,
372 /****************************************************************************
376 ***************************************************************************/
378 user_service_free(void *userdata
)
383 /****************************************************************************
387 ***************************************************************************/
388 static void close_delivered(USER_SERVICE_T
*user_service
)
390 vchiq_log_info(vchiq_arm_log_level
,
391 "close_delivered(handle=%x)",
392 user_service
->service
->handle
);
394 if (user_service
->close_pending
) {
395 /* Allow the underlying service to be culled */
396 unlock_service(user_service
->service
);
398 /* Wake the user-thread blocked in close_ or remove_service */
399 up(&user_service
->close_event
);
401 user_service
->close_pending
= 0;
405 struct vchiq_io_copy_callback_context
{
406 VCHIQ_ELEMENT_T
*current_element
;
407 size_t current_element_offset
;
408 unsigned long elements_to_go
;
409 size_t current_offset
;
413 vchiq_ioc_copy_element_data(
420 size_t bytes_this_round
;
421 struct vchiq_io_copy_callback_context
*copy_context
=
422 (struct vchiq_io_copy_callback_context
*)context
;
424 if (offset
!= copy_context
->current_offset
)
427 if (!copy_context
->elements_to_go
)
431 * Complex logic here to handle the case of 0 size elements
432 * in the middle of the array of elements.
434 * Need to skip over these 0 size elements.
437 bytes_this_round
= min(copy_context
->current_element
->size
-
438 copy_context
->current_element_offset
,
441 if (bytes_this_round
)
444 copy_context
->elements_to_go
--;
445 copy_context
->current_element
++;
446 copy_context
->current_element_offset
= 0;
448 if (!copy_context
->elements_to_go
)
452 res
= copy_from_user(dest
,
453 copy_context
->current_element
->data
+
454 copy_context
->current_element_offset
,
460 copy_context
->current_element_offset
+= bytes_this_round
;
461 copy_context
->current_offset
+= bytes_this_round
;
464 * Check if done with current element, and if so advance to the next.
466 if (copy_context
->current_element_offset
==
467 copy_context
->current_element
->size
) {
468 copy_context
->elements_to_go
--;
469 copy_context
->current_element
++;
470 copy_context
->current_element_offset
= 0;
473 return bytes_this_round
;
476 /**************************************************************************
478 * vchiq_ioc_queue_message
480 **************************************************************************/
481 static VCHIQ_STATUS_T
482 vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle
,
483 VCHIQ_ELEMENT_T
*elements
,
486 struct vchiq_io_copy_callback_context context
;
488 size_t total_size
= 0;
490 context
.current_element
= elements
;
491 context
.current_element_offset
= 0;
492 context
.elements_to_go
= count
;
493 context
.current_offset
= 0;
495 for (i
= 0; i
< count
; i
++) {
496 if (!elements
[i
].data
&& elements
[i
].size
!= 0)
499 total_size
+= elements
[i
].size
;
502 return vchiq_queue_message(handle
, vchiq_ioc_copy_element_data
,
503 &context
, total_size
);
506 /****************************************************************************
510 ***************************************************************************/
512 vchiq_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
514 VCHIQ_INSTANCE_T instance
= file
->private_data
;
515 VCHIQ_STATUS_T status
= VCHIQ_SUCCESS
;
516 VCHIQ_SERVICE_T
*service
= NULL
;
519 DEBUG_INITIALISE(g_state
.local
)
521 vchiq_log_trace(vchiq_arm_log_level
,
522 "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
524 ((_IOC_TYPE(cmd
) == VCHIQ_IOC_MAGIC
) &&
525 (_IOC_NR(cmd
) <= VCHIQ_IOC_MAX
)) ?
526 ioctl_names
[_IOC_NR(cmd
)] : "<invalid>", arg
);
529 case VCHIQ_IOC_SHUTDOWN
:
530 if (!instance
->connected
)
533 /* Remove all services */
535 while ((service
= next_service_by_instance(instance
->state
,
536 instance
, &i
)) != NULL
) {
537 status
= vchiq_remove_service(service
->handle
);
538 unlock_service(service
);
539 if (status
!= VCHIQ_SUCCESS
)
544 if (status
== VCHIQ_SUCCESS
) {
545 /* Wake the completion thread and ask it to exit */
546 instance
->closing
= 1;
547 up(&instance
->insert_event
);
552 case VCHIQ_IOC_CONNECT
:
553 if (instance
->connected
) {
557 rc
= mutex_lock_killable(&instance
->state
->mutex
);
559 vchiq_log_error(vchiq_arm_log_level
,
560 "vchiq: connect: could not lock mutex for "
562 instance
->state
->id
, rc
);
566 status
= vchiq_connect_internal(instance
->state
, instance
);
567 mutex_unlock(&instance
->state
->mutex
);
569 if (status
== VCHIQ_SUCCESS
)
570 instance
->connected
= 1;
572 vchiq_log_error(vchiq_arm_log_level
,
573 "vchiq: could not connect: %d", status
);
576 case VCHIQ_IOC_CREATE_SERVICE
: {
577 VCHIQ_CREATE_SERVICE_T args
;
578 USER_SERVICE_T
*user_service
= NULL
;
583 (&args
, (const void __user
*)arg
,
584 sizeof(args
)) != 0) {
589 user_service
= kmalloc(sizeof(USER_SERVICE_T
), GFP_KERNEL
);
596 if (!instance
->connected
) {
601 srvstate
= VCHIQ_SRVSTATE_OPENING
;
604 instance
->connected
?
605 VCHIQ_SRVSTATE_LISTENING
:
606 VCHIQ_SRVSTATE_HIDDEN
;
609 userdata
= args
.params
.userdata
;
610 args
.params
.callback
= service_callback
;
611 args
.params
.userdata
= user_service
;
612 service
= vchiq_add_service_internal(
614 &args
.params
, srvstate
,
615 instance
, user_service_free
);
617 if (service
!= NULL
) {
618 user_service
->service
= service
;
619 user_service
->userdata
= userdata
;
620 user_service
->instance
= instance
;
621 user_service
->is_vchi
= (args
.is_vchi
!= 0);
622 user_service
->dequeue_pending
= 0;
623 user_service
->close_pending
= 0;
624 user_service
->message_available_pos
=
625 instance
->completion_remove
- 1;
626 user_service
->msg_insert
= 0;
627 user_service
->msg_remove
= 0;
628 sema_init(&user_service
->insert_event
, 0);
629 sema_init(&user_service
->remove_event
, 0);
630 sema_init(&user_service
->close_event
, 0);
633 status
= vchiq_open_service_internal
634 (service
, instance
->pid
);
635 if (status
!= VCHIQ_SUCCESS
) {
636 vchiq_remove_service(service
->handle
);
638 ret
= (status
== VCHIQ_RETRY
) ?
644 if (copy_to_user((void __user
*)
645 &(((VCHIQ_CREATE_SERVICE_T __user
*)
647 (const void *)&service
->handle
,
648 sizeof(service
->handle
)) != 0) {
650 vchiq_remove_service(service
->handle
);
660 case VCHIQ_IOC_CLOSE_SERVICE
: {
661 VCHIQ_SERVICE_HANDLE_T handle
= (VCHIQ_SERVICE_HANDLE_T
)arg
;
663 service
= find_service_for_instance(instance
, handle
);
664 if (service
!= NULL
) {
665 USER_SERVICE_T
*user_service
=
666 (USER_SERVICE_T
*)service
->base
.userdata
;
667 /* close_pending is false on first entry, and when the
668 wait in vchiq_close_service has been interrupted. */
669 if (!user_service
->close_pending
) {
670 status
= vchiq_close_service(service
->handle
);
671 if (status
!= VCHIQ_SUCCESS
)
675 /* close_pending is true once the underlying service
676 has been closed until the client library calls the
677 CLOSE_DELIVERED ioctl, signalling close_event. */
678 if (user_service
->close_pending
&&
679 down_interruptible(&user_service
->close_event
))
680 status
= VCHIQ_RETRY
;
686 case VCHIQ_IOC_REMOVE_SERVICE
: {
687 VCHIQ_SERVICE_HANDLE_T handle
= (VCHIQ_SERVICE_HANDLE_T
)arg
;
689 service
= find_service_for_instance(instance
, handle
);
690 if (service
!= NULL
) {
691 USER_SERVICE_T
*user_service
=
692 (USER_SERVICE_T
*)service
->base
.userdata
;
693 /* close_pending is false on first entry, and when the
694 wait in vchiq_close_service has been interrupted. */
695 if (!user_service
->close_pending
) {
696 status
= vchiq_remove_service(service
->handle
);
697 if (status
!= VCHIQ_SUCCESS
)
701 /* close_pending is true once the underlying service
702 has been closed until the client library calls the
703 CLOSE_DELIVERED ioctl, signalling close_event. */
704 if (user_service
->close_pending
&&
705 down_interruptible(&user_service
->close_event
))
706 status
= VCHIQ_RETRY
;
712 case VCHIQ_IOC_USE_SERVICE
:
713 case VCHIQ_IOC_RELEASE_SERVICE
: {
714 VCHIQ_SERVICE_HANDLE_T handle
= (VCHIQ_SERVICE_HANDLE_T
)arg
;
716 service
= find_service_for_instance(instance
, handle
);
717 if (service
!= NULL
) {
718 status
= (cmd
== VCHIQ_IOC_USE_SERVICE
) ?
719 vchiq_use_service_internal(service
) :
720 vchiq_release_service_internal(service
);
721 if (status
!= VCHIQ_SUCCESS
) {
722 vchiq_log_error(vchiq_susp_log_level
,
723 "%s: cmd %s returned error %d for "
724 "service %c%c%c%c:%03d",
726 (cmd
== VCHIQ_IOC_USE_SERVICE
) ?
727 "VCHIQ_IOC_USE_SERVICE" :
728 "VCHIQ_IOC_RELEASE_SERVICE",
730 VCHIQ_FOURCC_AS_4CHARS(
731 service
->base
.fourcc
),
739 case VCHIQ_IOC_QUEUE_MESSAGE
: {
740 VCHIQ_QUEUE_MESSAGE_T args
;
742 (&args
, (const void __user
*)arg
,
743 sizeof(args
)) != 0) {
748 service
= find_service_for_instance(instance
, args
.handle
);
750 if ((service
!= NULL
) && (args
.count
<= MAX_ELEMENTS
)) {
751 /* Copy elements into kernel space */
752 VCHIQ_ELEMENT_T elements
[MAX_ELEMENTS
];
753 if (copy_from_user(elements
, args
.elements
,
754 args
.count
* sizeof(VCHIQ_ELEMENT_T
)) == 0)
755 status
= vchiq_ioc_queue_message
757 elements
, args
.count
);
765 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT
:
766 case VCHIQ_IOC_QUEUE_BULK_RECEIVE
: {
767 VCHIQ_QUEUE_BULK_TRANSFER_T args
;
768 struct bulk_waiter_node
*waiter
= NULL
;
769 VCHIQ_BULK_DIR_T dir
=
770 (cmd
== VCHIQ_IOC_QUEUE_BULK_TRANSMIT
) ?
771 VCHIQ_BULK_TRANSMIT
: VCHIQ_BULK_RECEIVE
;
774 (&args
, (const void __user
*)arg
,
775 sizeof(args
)) != 0) {
780 service
= find_service_for_instance(instance
, args
.handle
);
786 if (args
.mode
== VCHIQ_BULK_MODE_BLOCKING
) {
787 waiter
= kzalloc(sizeof(struct bulk_waiter_node
),
793 args
.userdata
= &waiter
->bulk_waiter
;
794 } else if (args
.mode
== VCHIQ_BULK_MODE_WAITING
) {
795 struct list_head
*pos
;
796 mutex_lock(&instance
->bulk_waiter_list_mutex
);
797 list_for_each(pos
, &instance
->bulk_waiter_list
) {
798 if (list_entry(pos
, struct bulk_waiter_node
,
799 list
)->pid
== current
->pid
) {
800 waiter
= list_entry(pos
,
801 struct bulk_waiter_node
,
808 mutex_unlock(&instance
->bulk_waiter_list_mutex
);
810 vchiq_log_error(vchiq_arm_log_level
,
811 "no bulk_waiter found for pid %d",
816 vchiq_log_info(vchiq_arm_log_level
,
817 "found bulk_waiter %pK for pid %d", waiter
,
819 args
.userdata
= &waiter
->bulk_waiter
;
821 status
= vchiq_bulk_transfer
823 VCHI_MEM_HANDLE_INVALID
,
824 args
.data
, args
.size
,
825 args
.userdata
, args
.mode
,
829 if ((status
!= VCHIQ_RETRY
) || fatal_signal_pending(current
) ||
830 !waiter
->bulk_waiter
.bulk
) {
831 if (waiter
->bulk_waiter
.bulk
) {
832 /* Cancel the signal when the transfer
834 spin_lock(&bulk_waiter_spinlock
);
835 waiter
->bulk_waiter
.bulk
->userdata
= NULL
;
836 spin_unlock(&bulk_waiter_spinlock
);
840 const VCHIQ_BULK_MODE_T mode_waiting
=
841 VCHIQ_BULK_MODE_WAITING
;
842 waiter
->pid
= current
->pid
;
843 mutex_lock(&instance
->bulk_waiter_list_mutex
);
844 list_add(&waiter
->list
, &instance
->bulk_waiter_list
);
845 mutex_unlock(&instance
->bulk_waiter_list_mutex
);
846 vchiq_log_info(vchiq_arm_log_level
,
847 "saved bulk_waiter %pK for pid %d",
848 waiter
, current
->pid
);
850 if (copy_to_user((void __user
*)
851 &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user
*)
853 (const void *)&mode_waiting
,
854 sizeof(mode_waiting
)) != 0)
859 case VCHIQ_IOC_AWAIT_COMPLETION
: {
860 VCHIQ_AWAIT_COMPLETION_T args
;
862 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
863 if (!instance
->connected
) {
868 if (copy_from_user(&args
, (const void __user
*)arg
,
869 sizeof(args
)) != 0) {
874 mutex_lock(&instance
->completion_mutex
);
876 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
877 while ((instance
->completion_remove
==
878 instance
->completion_insert
)
879 && !instance
->closing
) {
881 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
882 mutex_unlock(&instance
->completion_mutex
);
883 rc
= down_interruptible(&instance
->insert_event
);
884 mutex_lock(&instance
->completion_mutex
);
886 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
887 vchiq_log_info(vchiq_arm_log_level
,
888 "AWAIT_COMPLETION interrupted");
893 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
895 /* A read memory barrier is needed to stop prefetch of a stale
901 int msgbufcount
= args
.msgbufcount
;
902 for (ret
= 0; ret
< args
.count
; ret
++) {
903 VCHIQ_COMPLETION_DATA_T
*completion
;
904 VCHIQ_SERVICE_T
*service
;
905 USER_SERVICE_T
*user_service
;
906 VCHIQ_HEADER_T
*header
;
907 if (instance
->completion_remove
==
908 instance
->completion_insert
)
910 completion
= &instance
->completions
[
911 instance
->completion_remove
&
912 (MAX_COMPLETIONS
- 1)];
914 service
= completion
->service_userdata
;
915 user_service
= service
->base
.userdata
;
916 completion
->service_userdata
=
917 user_service
->userdata
;
919 header
= completion
->header
;
924 msglen
= header
->size
+
925 sizeof(VCHIQ_HEADER_T
);
926 /* This must be a VCHIQ-style service */
927 if (args
.msgbufsize
< msglen
) {
930 "header %pK: msgbufsize %x < msglen %x",
931 header
, args
.msgbufsize
,
933 WARN(1, "invalid message "
939 if (msgbufcount
<= 0)
940 /* Stall here for lack of a
941 ** buffer for the message. */
943 /* Get the pointer from user space */
945 if (copy_from_user(&msgbuf
,
946 (const void __user
*)
947 &args
.msgbufs
[msgbufcount
],
948 sizeof(msgbuf
)) != 0) {
954 /* Copy the message to user space */
955 if (copy_to_user(msgbuf
, header
,
962 /* Now it has been copied, the message
963 ** can be released. */
964 vchiq_release_message(service
->handle
,
967 /* The completion must point to the
969 completion
->header
= msgbuf
;
972 if ((completion
->reason
==
973 VCHIQ_SERVICE_CLOSED
) &&
974 !instance
->use_close_delivered
)
975 unlock_service(service
);
977 if (copy_to_user((void __user
*)(
979 ret
* sizeof(VCHIQ_COMPLETION_DATA_T
)),
981 sizeof(VCHIQ_COMPLETION_DATA_T
)) != 0) {
987 instance
->completion_remove
++;
990 if (msgbufcount
!= args
.msgbufcount
) {
991 if (copy_to_user((void __user
*)
992 &((VCHIQ_AWAIT_COMPLETION_T
*)arg
)->
995 sizeof(msgbufcount
)) != 0) {
1002 up(&instance
->remove_event
);
1003 mutex_unlock(&instance
->completion_mutex
);
1004 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
1007 case VCHIQ_IOC_DEQUEUE_MESSAGE
: {
1008 VCHIQ_DEQUEUE_MESSAGE_T args
;
1009 USER_SERVICE_T
*user_service
;
1010 VCHIQ_HEADER_T
*header
;
1012 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE
);
1014 (&args
, (const void __user
*)arg
,
1015 sizeof(args
)) != 0) {
1019 service
= find_service_for_instance(instance
, args
.handle
);
1024 user_service
= (USER_SERVICE_T
*)service
->base
.userdata
;
1025 if (user_service
->is_vchi
== 0) {
1030 spin_lock(&msg_queue_spinlock
);
1031 if (user_service
->msg_remove
== user_service
->msg_insert
) {
1032 if (!args
.blocking
) {
1033 spin_unlock(&msg_queue_spinlock
);
1034 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE
);
1038 user_service
->dequeue_pending
= 1;
1040 spin_unlock(&msg_queue_spinlock
);
1041 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE
);
1042 if (down_interruptible(
1043 &user_service
->insert_event
) != 0) {
1044 vchiq_log_info(vchiq_arm_log_level
,
1045 "DEQUEUE_MESSAGE interrupted");
1049 spin_lock(&msg_queue_spinlock
);
1050 } while (user_service
->msg_remove
==
1051 user_service
->msg_insert
);
1057 BUG_ON((int)(user_service
->msg_insert
-
1058 user_service
->msg_remove
) < 0);
1060 header
= user_service
->msg_queue
[user_service
->msg_remove
&
1061 (MSG_QUEUE_SIZE
- 1)];
1062 user_service
->msg_remove
++;
1063 spin_unlock(&msg_queue_spinlock
);
1065 up(&user_service
->remove_event
);
1068 else if (header
->size
<= args
.bufsize
) {
1069 /* Copy to user space if msgbuf is not NULL */
1070 if ((args
.buf
== NULL
) ||
1071 (copy_to_user((void __user
*)args
.buf
,
1073 header
->size
) == 0)) {
1075 vchiq_release_message(
1081 vchiq_log_error(vchiq_arm_log_level
,
1082 "header %pK: bufsize %x < size %x",
1083 header
, args
.bufsize
, header
->size
);
1084 WARN(1, "invalid size\n");
1087 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE
);
1090 case VCHIQ_IOC_GET_CLIENT_ID
: {
1091 VCHIQ_SERVICE_HANDLE_T handle
= (VCHIQ_SERVICE_HANDLE_T
)arg
;
1093 ret
= vchiq_get_client_id(handle
);
1096 case VCHIQ_IOC_GET_CONFIG
: {
1097 VCHIQ_GET_CONFIG_T args
;
1098 VCHIQ_CONFIG_T config
;
1100 if (copy_from_user(&args
, (const void __user
*)arg
,
1101 sizeof(args
)) != 0) {
1105 if (args
.config_size
> sizeof(config
)) {
1109 status
= vchiq_get_config(instance
, args
.config_size
, &config
);
1110 if (status
== VCHIQ_SUCCESS
) {
1111 if (copy_to_user((void __user
*)args
.pconfig
,
1112 &config
, args
.config_size
) != 0) {
1119 case VCHIQ_IOC_SET_SERVICE_OPTION
: {
1120 VCHIQ_SET_SERVICE_OPTION_T args
;
1123 &args
, (const void __user
*)arg
,
1124 sizeof(args
)) != 0) {
1129 service
= find_service_for_instance(instance
, args
.handle
);
1135 status
= vchiq_set_service_option(
1136 args
.handle
, args
.option
, args
.value
);
1139 case VCHIQ_IOC_DUMP_PHYS_MEM
: {
1140 VCHIQ_DUMP_MEM_T args
;
1143 (&args
, (const void __user
*)arg
,
1144 sizeof(args
)) != 0) {
1148 dump_phys_mem(args
.virt_addr
, args
.num_bytes
);
1151 case VCHIQ_IOC_LIB_VERSION
: {
1152 unsigned int lib_version
= (unsigned int)arg
;
1154 if (lib_version
< VCHIQ_VERSION_MIN
)
1156 else if (lib_version
>= VCHIQ_VERSION_CLOSE_DELIVERED
)
1157 instance
->use_close_delivered
= 1;
1160 case VCHIQ_IOC_CLOSE_DELIVERED
: {
1161 VCHIQ_SERVICE_HANDLE_T handle
= (VCHIQ_SERVICE_HANDLE_T
)arg
;
1163 service
= find_closed_service_for_instance(instance
, handle
);
1164 if (service
!= NULL
) {
1165 USER_SERVICE_T
*user_service
=
1166 (USER_SERVICE_T
*)service
->base
.userdata
;
1167 close_delivered(user_service
);
1179 unlock_service(service
);
1182 if (status
== VCHIQ_ERROR
)
1184 else if (status
== VCHIQ_RETRY
)
1188 if ((status
== VCHIQ_SUCCESS
) && (ret
< 0) && (ret
!= -EINTR
) &&
1189 (ret
!= -EWOULDBLOCK
))
1190 vchiq_log_info(vchiq_arm_log_level
,
1191 " ioctl instance %lx, cmd %s -> status %d, %ld",
1192 (unsigned long)instance
,
1193 (_IOC_NR(cmd
) <= VCHIQ_IOC_MAX
) ?
1194 ioctl_names
[_IOC_NR(cmd
)] :
1198 vchiq_log_trace(vchiq_arm_log_level
,
1199 " ioctl instance %lx, cmd %s -> status %d, %ld",
1200 (unsigned long)instance
,
1201 (_IOC_NR(cmd
) <= VCHIQ_IOC_MAX
) ?
1202 ioctl_names
[_IOC_NR(cmd
)] :
1209 /****************************************************************************
1213 ***************************************************************************/
1216 vchiq_open(struct inode
*inode
, struct file
*file
)
1218 int dev
= iminor(inode
) & 0x0f;
1219 vchiq_log_info(vchiq_arm_log_level
, "vchiq_open");
1223 VCHIQ_STATE_T
*state
= vchiq_get_state();
1224 VCHIQ_INSTANCE_T instance
;
1227 vchiq_log_error(vchiq_arm_log_level
,
1228 "vchiq has no connection to VideoCore");
1232 instance
= kzalloc(sizeof(*instance
), GFP_KERNEL
);
1236 instance
->state
= state
;
1237 instance
->pid
= current
->tgid
;
1239 ret
= vchiq_debugfs_add_instance(instance
);
1245 sema_init(&instance
->insert_event
, 0);
1246 sema_init(&instance
->remove_event
, 0);
1247 mutex_init(&instance
->completion_mutex
);
1248 mutex_init(&instance
->bulk_waiter_list_mutex
);
1249 INIT_LIST_HEAD(&instance
->bulk_waiter_list
);
1251 file
->private_data
= instance
;
1255 vchiq_log_error(vchiq_arm_log_level
,
1256 "Unknown minor device: %d", dev
);
1263 /****************************************************************************
1267 ***************************************************************************/
1270 vchiq_release(struct inode
*inode
, struct file
*file
)
1272 int dev
= iminor(inode
) & 0x0f;
1276 VCHIQ_INSTANCE_T instance
= file
->private_data
;
1277 VCHIQ_STATE_T
*state
= vchiq_get_state();
1278 VCHIQ_SERVICE_T
*service
;
1281 vchiq_log_info(vchiq_arm_log_level
,
1282 "vchiq_release: instance=%lx",
1283 (unsigned long)instance
);
1290 /* Ensure videocore is awake to allow termination. */
1291 vchiq_use_internal(instance
->state
, NULL
,
1294 mutex_lock(&instance
->completion_mutex
);
1296 /* Wake the completion thread and ask it to exit */
1297 instance
->closing
= 1;
1298 up(&instance
->insert_event
);
1300 mutex_unlock(&instance
->completion_mutex
);
1302 /* Wake the slot handler if the completion queue is full. */
1303 up(&instance
->remove_event
);
1305 /* Mark all services for termination... */
1307 while ((service
= next_service_by_instance(state
, instance
,
1309 USER_SERVICE_T
*user_service
= service
->base
.userdata
;
1311 /* Wake the slot handler if the msg queue is full. */
1312 up(&user_service
->remove_event
);
1314 vchiq_terminate_service_internal(service
);
1315 unlock_service(service
);
1318 /* ...and wait for them to die */
1320 while ((service
= next_service_by_instance(state
, instance
, &i
))
1322 USER_SERVICE_T
*user_service
= service
->base
.userdata
;
1324 down(&service
->remove_event
);
1326 BUG_ON(service
->srvstate
!= VCHIQ_SRVSTATE_FREE
);
1328 spin_lock(&msg_queue_spinlock
);
1330 while (user_service
->msg_remove
!=
1331 user_service
->msg_insert
) {
1332 VCHIQ_HEADER_T
*header
= user_service
->
1333 msg_queue
[user_service
->msg_remove
&
1334 (MSG_QUEUE_SIZE
- 1)];
1335 user_service
->msg_remove
++;
1336 spin_unlock(&msg_queue_spinlock
);
1339 vchiq_release_message(
1342 spin_lock(&msg_queue_spinlock
);
1345 spin_unlock(&msg_queue_spinlock
);
1347 unlock_service(service
);
1350 /* Release any closed services */
1351 while (instance
->completion_remove
!=
1352 instance
->completion_insert
) {
1353 VCHIQ_COMPLETION_DATA_T
*completion
;
1354 VCHIQ_SERVICE_T
*service
;
1355 completion
= &instance
->completions
[
1356 instance
->completion_remove
&
1357 (MAX_COMPLETIONS
- 1)];
1358 service
= completion
->service_userdata
;
1359 if (completion
->reason
== VCHIQ_SERVICE_CLOSED
)
1361 USER_SERVICE_T
*user_service
=
1362 service
->base
.userdata
;
1364 /* Wake any blocked user-thread */
1365 if (instance
->use_close_delivered
)
1366 up(&user_service
->close_event
);
1367 unlock_service(service
);
1369 instance
->completion_remove
++;
1372 /* Release the PEER service count. */
1373 vchiq_release_internal(instance
->state
, NULL
);
1376 struct list_head
*pos
, *next
;
1377 list_for_each_safe(pos
, next
,
1378 &instance
->bulk_waiter_list
) {
1379 struct bulk_waiter_node
*waiter
;
1380 waiter
= list_entry(pos
,
1381 struct bulk_waiter_node
,
1384 vchiq_log_info(vchiq_arm_log_level
,
1385 "bulk_waiter - cleaned up %pK for pid %d",
1386 waiter
, waiter
->pid
);
1391 vchiq_debugfs_remove_instance(instance
);
1394 file
->private_data
= NULL
;
1398 vchiq_log_error(vchiq_arm_log_level
,
1399 "Unknown minor device: %d", dev
);
1407 /****************************************************************************
1411 ***************************************************************************/
1414 vchiq_dump(void *dump_context
, const char *str
, int len
)
1416 DUMP_CONTEXT_T
*context
= (DUMP_CONTEXT_T
*)dump_context
;
1418 if (context
->actual
< context
->space
) {
1420 if (context
->offset
> 0) {
1421 int skip_bytes
= min(len
, (int)context
->offset
);
1424 context
->offset
-= skip_bytes
;
1425 if (context
->offset
> 0)
1428 copy_bytes
= min(len
, (int)(context
->space
- context
->actual
));
1429 if (copy_bytes
== 0)
1431 if (copy_to_user(context
->buf
+ context
->actual
, str
,
1433 context
->actual
= -EFAULT
;
1434 context
->actual
+= copy_bytes
;
1437 /* If tne terminating NUL is included in the length, then it
1438 ** marks the end of a line and should be replaced with a
1439 ** carriage return. */
1440 if ((len
== 0) && (str
[copy_bytes
- 1] == '\0')) {
1442 if (copy_to_user(context
->buf
+ context
->actual
- 1,
1444 context
->actual
= -EFAULT
;
1449 /****************************************************************************
1451 * vchiq_dump_platform_instance_state
1453 ***************************************************************************/
1456 vchiq_dump_platform_instances(void *dump_context
)
1458 VCHIQ_STATE_T
*state
= vchiq_get_state();
1463 /* There is no list of instances, so instead scan all services,
1464 marking those that have been dumped. */
1466 for (i
= 0; i
< state
->unused_service
; i
++) {
1467 VCHIQ_SERVICE_T
*service
= state
->services
[i
];
1468 VCHIQ_INSTANCE_T instance
;
1470 if (service
&& (service
->base
.callback
== service_callback
)) {
1471 instance
= service
->instance
;
1477 for (i
= 0; i
< state
->unused_service
; i
++) {
1478 VCHIQ_SERVICE_T
*service
= state
->services
[i
];
1479 VCHIQ_INSTANCE_T instance
;
1481 if (service
&& (service
->base
.callback
== service_callback
)) {
1482 instance
= service
->instance
;
1483 if (instance
&& !instance
->mark
) {
1484 len
= snprintf(buf
, sizeof(buf
),
1485 "Instance %pK: pid %d,%s completions %d/%d",
1486 instance
, instance
->pid
,
1487 instance
->connected
? " connected, " :
1489 instance
->completion_insert
-
1490 instance
->completion_remove
,
1493 vchiq_dump(dump_context
, buf
, len
+ 1);
1501 /****************************************************************************
1503 * vchiq_dump_platform_service_state
1505 ***************************************************************************/
1508 vchiq_dump_platform_service_state(void *dump_context
, VCHIQ_SERVICE_T
*service
)
1510 USER_SERVICE_T
*user_service
= (USER_SERVICE_T
*)service
->base
.userdata
;
1514 len
= snprintf(buf
, sizeof(buf
), " instance %pK", service
->instance
);
1516 if ((service
->base
.callback
== service_callback
) &&
1517 user_service
->is_vchi
) {
1518 len
+= snprintf(buf
+ len
, sizeof(buf
) - len
,
1520 user_service
->msg_insert
- user_service
->msg_remove
,
1523 if (user_service
->dequeue_pending
)
1524 len
+= snprintf(buf
+ len
, sizeof(buf
) - len
,
1525 " (dequeue pending)");
1528 vchiq_dump(dump_context
, buf
, len
+ 1);
1531 /****************************************************************************
1535 ***************************************************************************/
1538 dump_phys_mem(void *virt_addr
, uint32_t num_bytes
)
1541 uint8_t *end_virt_addr
= virt_addr
+ num_bytes
;
1548 struct page
**pages
;
1549 uint8_t *kmapped_virt_ptr
;
1551 /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
1553 virt_addr
= (void *)((unsigned long)virt_addr
& ~0x0fuL
);
1554 end_virt_addr
= (void *)(((unsigned long)end_virt_addr
+ 15uL) &
1557 offset
= (int)(long)virt_addr
& (PAGE_SIZE
- 1);
1558 end_offset
= (int)(long)end_virt_addr
& (PAGE_SIZE
- 1);
1560 num_pages
= (offset
+ num_bytes
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
1562 pages
= kmalloc(sizeof(struct page
*) * num_pages
, GFP_KERNEL
);
1563 if (pages
== NULL
) {
1564 vchiq_log_error(vchiq_arm_log_level
,
1565 "Unable to allocation memory for %d pages\n",
1570 down_read(¤t
->mm
->mmap_sem
);
1571 rc
= get_user_pages(
1572 (unsigned long)virt_addr
, /* start */
1573 num_pages
, /* len */
1575 pages
, /* pages (array of page pointers) */
1577 up_read(¤t
->mm
->mmap_sem
);
1583 vchiq_log_error(vchiq_arm_log_level
,
1584 "Failed to get user pages: %d\n", rc
);
1588 while (offset
< end_offset
) {
1590 int page_offset
= offset
% PAGE_SIZE
;
1591 page_idx
= offset
/ PAGE_SIZE
;
1593 if (page_idx
!= prev_idx
) {
1597 page
= pages
[page_idx
];
1598 kmapped_virt_ptr
= kmap(page
);
1600 prev_idx
= page_idx
;
1603 if (vchiq_arm_log_level
>= VCHIQ_LOG_TRACE
)
1604 vchiq_log_dump_mem("ph",
1605 (uint32_t)(unsigned long)&kmapped_virt_ptr
[
1607 &kmapped_virt_ptr
[page_offset
], 16);
1616 for (page_idx
= 0; page_idx
< num_pages
; page_idx
++)
1617 put_page(pages
[page_idx
]);
1622 /****************************************************************************
1626 ***************************************************************************/
1629 vchiq_read(struct file
*file
, char __user
*buf
,
1630 size_t count
, loff_t
*ppos
)
1632 DUMP_CONTEXT_T context
;
1635 context
.space
= count
;
1636 context
.offset
= *ppos
;
1638 vchiq_dump_state(&context
, &g_state
);
1640 *ppos
+= context
.actual
;
1642 return context
.actual
;
1646 vchiq_get_state(void)
1649 if (g_state
.remote
== NULL
)
1650 printk(KERN_ERR
"%s: g_state.remote == NULL\n", __func__
);
1651 else if (g_state
.remote
->initialised
!= 1)
1652 printk(KERN_NOTICE
"%s: g_state.remote->initialised != 1 (%d)\n",
1653 __func__
, g_state
.remote
->initialised
);
1655 return ((g_state
.remote
!= NULL
) &&
1656 (g_state
.remote
->initialised
== 1)) ? &g_state
: NULL
;
1659 static const struct file_operations
1661 .owner
= THIS_MODULE
,
1662 .unlocked_ioctl
= vchiq_ioctl
,
1664 .release
= vchiq_release
,
1669 * Autosuspend related functionality
1673 vchiq_videocore_wanted(VCHIQ_STATE_T
*state
)
1675 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
1677 /* autosuspend not supported - always return wanted */
1679 else if (arm_state
->blocked_count
)
1681 else if (!arm_state
->videocore_use_count
)
1682 /* usage count zero - check for override unless we're forcing */
1683 if (arm_state
->resume_blocked
)
1686 return vchiq_platform_videocore_wanted(state
);
1688 /* non-zero usage count - videocore still required */
1692 static VCHIQ_STATUS_T
1693 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason
,
1694 VCHIQ_HEADER_T
*header
,
1695 VCHIQ_SERVICE_HANDLE_T service_user
,
1698 vchiq_log_error(vchiq_susp_log_level
,
1699 "%s callback reason %d", __func__
, reason
);
1704 vchiq_keepalive_thread_func(void *v
)
1706 VCHIQ_STATE_T
*state
= (VCHIQ_STATE_T
*) v
;
1707 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
1709 VCHIQ_STATUS_T status
;
1710 VCHIQ_INSTANCE_T instance
;
1711 VCHIQ_SERVICE_HANDLE_T ka_handle
;
1713 VCHIQ_SERVICE_PARAMS_T params
= {
1714 .fourcc
= VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1715 .callback
= vchiq_keepalive_vchiq_callback
,
1716 .version
= KEEPALIVE_VER
,
1717 .version_min
= KEEPALIVE_VER_MIN
1720 status
= vchiq_initialise(&instance
);
1721 if (status
!= VCHIQ_SUCCESS
) {
1722 vchiq_log_error(vchiq_susp_log_level
,
1723 "%s vchiq_initialise failed %d", __func__
, status
);
1727 status
= vchiq_connect(instance
);
1728 if (status
!= VCHIQ_SUCCESS
) {
1729 vchiq_log_error(vchiq_susp_log_level
,
1730 "%s vchiq_connect failed %d", __func__
, status
);
1734 status
= vchiq_add_service(instance
, ¶ms
, &ka_handle
);
1735 if (status
!= VCHIQ_SUCCESS
) {
1736 vchiq_log_error(vchiq_susp_log_level
,
1737 "%s vchiq_open_service failed %d", __func__
, status
);
1742 long rc
= 0, uc
= 0;
1743 if (wait_for_completion_interruptible(&arm_state
->ka_evt
)
1745 vchiq_log_error(vchiq_susp_log_level
,
1746 "%s interrupted", __func__
);
1747 flush_signals(current
);
1751 /* read and clear counters. Do release_count then use_count to
1752 * prevent getting more releases than uses */
1753 rc
= atomic_xchg(&arm_state
->ka_release_count
, 0);
1754 uc
= atomic_xchg(&arm_state
->ka_use_count
, 0);
1756 /* Call use/release service the requisite number of times.
1757 * Process use before release so use counts don't go negative */
1759 atomic_inc(&arm_state
->ka_use_ack_count
);
1760 status
= vchiq_use_service(ka_handle
);
1761 if (status
!= VCHIQ_SUCCESS
) {
1762 vchiq_log_error(vchiq_susp_log_level
,
1763 "%s vchiq_use_service error %d",
1768 status
= vchiq_release_service(ka_handle
);
1769 if (status
!= VCHIQ_SUCCESS
) {
1770 vchiq_log_error(vchiq_susp_log_level
,
1771 "%s vchiq_release_service error %d",
1778 vchiq_shutdown(instance
);
1786 vchiq_arm_init_state(VCHIQ_STATE_T
*state
, VCHIQ_ARM_STATE_T
*arm_state
)
1789 rwlock_init(&arm_state
->susp_res_lock
);
1791 init_completion(&arm_state
->ka_evt
);
1792 atomic_set(&arm_state
->ka_use_count
, 0);
1793 atomic_set(&arm_state
->ka_use_ack_count
, 0);
1794 atomic_set(&arm_state
->ka_release_count
, 0);
1796 init_completion(&arm_state
->vc_suspend_complete
);
1798 init_completion(&arm_state
->vc_resume_complete
);
1799 /* Initialise to 'done' state. We only want to block on resume
1800 * completion while videocore is suspended. */
1801 set_resume_state(arm_state
, VC_RESUME_RESUMED
);
1803 init_completion(&arm_state
->resume_blocker
);
1804 /* Initialise to 'done' state. We only want to block on this
1805 * completion while resume is blocked */
1806 complete_all(&arm_state
->resume_blocker
);
1808 init_completion(&arm_state
->blocked_blocker
);
1809 /* Initialise to 'done' state. We only want to block on this
1810 * completion while things are waiting on the resume blocker */
1811 complete_all(&arm_state
->blocked_blocker
);
1813 arm_state
->suspend_timer_timeout
= SUSPEND_TIMER_TIMEOUT_MS
;
1814 arm_state
->suspend_timer_running
= 0;
1815 setup_timer(&arm_state
->suspend_timer
, suspend_timer_callback
,
1816 (unsigned long)(state
));
1818 arm_state
->first_connect
= 0;
1821 return VCHIQ_SUCCESS
;
1825 ** Functions to modify the state variables;
1826 ** set_suspend_state
1829 ** There are more state variables than we might like, so ensure they remain in
1830 ** step. Suspend and resume state are maintained separately, since most of
1831 ** these state machines can operate independently. However, there are a few
1832 ** states where state transitions in one state machine cause a reset to the
1833 ** other state machine. In addition, there are some completion events which
1834 ** need to occur on state machine reset and end-state(s), so these are also
1835 ** dealt with in these functions.
1837 ** In all states we set the state variable according to the input, but in some
1838 ** cases we perform additional steps outlined below;
1840 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
1841 ** The suspend completion is completed after any suspend
1842 ** attempt. When we reset the state machine we also reset
1843 ** the completion. This reset occurs when videocore is
1844 ** resumed, and also if we initiate suspend after a suspend
1847 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
1848 ** suspend - ie from this point on we must try to suspend
1849 ** before resuming can occur. We therefore also reset the
1850 ** resume state machine to VC_RESUME_IDLE in this state.
1852 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
1853 ** complete_all on the suspend completion to notify
1854 ** anything waiting for suspend to happen.
1856 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
1857 ** initiate resume, so no need to alter resume state.
1858 ** We call complete_all on the suspend completion to notify
1859 ** of suspend rejection.
1861 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
1862 ** suspend completion and reset the resume state machine.
1864 ** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
1865 ** resume completion is in it's 'done' state whenever
1866 ** videcore is running. Therefore, the VC_RESUME_IDLE
1867 ** state implies that videocore is suspended.
1868 ** Hence, any thread which needs to wait until videocore is
1869 ** running can wait on this completion - it will only block
1870 ** if videocore is suspended.
1872 ** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
1873 ** Call complete_all on the resume completion to unblock
1874 ** any threads waiting for resume. Also reset the suspend
1875 ** state machine to it's idle state.
1877 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
1881 set_suspend_state(VCHIQ_ARM_STATE_T
*arm_state
,
1882 enum vc_suspend_status new_state
)
1884 /* set the state in all cases */
1885 arm_state
->vc_suspend_state
= new_state
;
1887 /* state specific additional actions */
1888 switch (new_state
) {
1889 case VC_SUSPEND_FORCE_CANCELED
:
1890 complete_all(&arm_state
->vc_suspend_complete
);
1892 case VC_SUSPEND_REJECTED
:
1893 complete_all(&arm_state
->vc_suspend_complete
);
1895 case VC_SUSPEND_FAILED
:
1896 complete_all(&arm_state
->vc_suspend_complete
);
1897 arm_state
->vc_resume_state
= VC_RESUME_RESUMED
;
1898 complete_all(&arm_state
->vc_resume_complete
);
1900 case VC_SUSPEND_IDLE
:
1901 reinit_completion(&arm_state
->vc_suspend_complete
);
1903 case VC_SUSPEND_REQUESTED
:
1905 case VC_SUSPEND_IN_PROGRESS
:
1906 set_resume_state(arm_state
, VC_RESUME_IDLE
);
1908 case VC_SUSPEND_SUSPENDED
:
1909 complete_all(&arm_state
->vc_suspend_complete
);
1918 set_resume_state(VCHIQ_ARM_STATE_T
*arm_state
,
1919 enum vc_resume_status new_state
)
1921 /* set the state in all cases */
1922 arm_state
->vc_resume_state
= new_state
;
1924 /* state specific additional actions */
1925 switch (new_state
) {
1926 case VC_RESUME_FAILED
:
1928 case VC_RESUME_IDLE
:
1929 reinit_completion(&arm_state
->vc_resume_complete
);
1931 case VC_RESUME_REQUESTED
:
1933 case VC_RESUME_IN_PROGRESS
:
1935 case VC_RESUME_RESUMED
:
1936 complete_all(&arm_state
->vc_resume_complete
);
1937 set_suspend_state(arm_state
, VC_SUSPEND_IDLE
);
1946 /* should be called with the write lock held */
1948 start_suspend_timer(VCHIQ_ARM_STATE_T
*arm_state
)
1950 del_timer(&arm_state
->suspend_timer
);
1951 arm_state
->suspend_timer
.expires
= jiffies
+
1952 msecs_to_jiffies(arm_state
->
1953 suspend_timer_timeout
);
1954 add_timer(&arm_state
->suspend_timer
);
1955 arm_state
->suspend_timer_running
= 1;
1958 /* should be called with the write lock held */
1960 stop_suspend_timer(VCHIQ_ARM_STATE_T
*arm_state
)
1962 if (arm_state
->suspend_timer_running
) {
1963 del_timer(&arm_state
->suspend_timer
);
1964 arm_state
->suspend_timer_running
= 0;
1969 need_resume(VCHIQ_STATE_T
*state
)
1971 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
1972 return (arm_state
->vc_suspend_state
> VC_SUSPEND_IDLE
) &&
1973 (arm_state
->vc_resume_state
< VC_RESUME_REQUESTED
) &&
1974 vchiq_videocore_wanted(state
);
1978 block_resume(VCHIQ_ARM_STATE_T
*arm_state
)
1980 int status
= VCHIQ_SUCCESS
;
1981 const unsigned long timeout_val
=
1982 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS
);
1983 int resume_count
= 0;
1985 /* Allow any threads which were blocked by the last force suspend to
1986 * complete if they haven't already. Only give this one shot; if
1987 * blocked_count is incremented after blocked_blocker is completed
1988 * (which only happens when blocked_count hits 0) then those threads
1989 * will have to wait until next time around */
1990 if (arm_state
->blocked_count
) {
1991 reinit_completion(&arm_state
->blocked_blocker
);
1992 write_unlock_bh(&arm_state
->susp_res_lock
);
1993 vchiq_log_info(vchiq_susp_log_level
, "%s wait for previously "
1994 "blocked clients", __func__
);
1995 if (wait_for_completion_interruptible_timeout(
1996 &arm_state
->blocked_blocker
, timeout_val
)
1998 vchiq_log_error(vchiq_susp_log_level
, "%s wait for "
1999 "previously blocked clients failed" , __func__
);
2000 status
= VCHIQ_ERROR
;
2001 write_lock_bh(&arm_state
->susp_res_lock
);
2004 vchiq_log_info(vchiq_susp_log_level
, "%s previously blocked "
2005 "clients resumed", __func__
);
2006 write_lock_bh(&arm_state
->susp_res_lock
);
2009 /* We need to wait for resume to complete if it's in process */
2010 while (arm_state
->vc_resume_state
!= VC_RESUME_RESUMED
&&
2011 arm_state
->vc_resume_state
> VC_RESUME_IDLE
) {
2012 if (resume_count
> 1) {
2013 status
= VCHIQ_ERROR
;
2014 vchiq_log_error(vchiq_susp_log_level
, "%s waited too "
2015 "many times for resume" , __func__
);
2018 write_unlock_bh(&arm_state
->susp_res_lock
);
2019 vchiq_log_info(vchiq_susp_log_level
, "%s wait for resume",
2021 if (wait_for_completion_interruptible_timeout(
2022 &arm_state
->vc_resume_complete
, timeout_val
)
2024 vchiq_log_error(vchiq_susp_log_level
, "%s wait for "
2025 "resume failed (%s)", __func__
,
2026 resume_state_names
[arm_state
->vc_resume_state
+
2027 VC_RESUME_NUM_OFFSET
]);
2028 status
= VCHIQ_ERROR
;
2029 write_lock_bh(&arm_state
->susp_res_lock
);
2032 vchiq_log_info(vchiq_susp_log_level
, "%s resumed", __func__
);
2033 write_lock_bh(&arm_state
->susp_res_lock
);
2036 reinit_completion(&arm_state
->resume_blocker
);
2037 arm_state
->resume_blocked
= 1;
2044 unblock_resume(VCHIQ_ARM_STATE_T
*arm_state
)
2046 complete_all(&arm_state
->resume_blocker
);
2047 arm_state
->resume_blocked
= 0;
2050 /* Initiate suspend via slot handler. Should be called with the write lock
2053 vchiq_arm_vcsuspend(VCHIQ_STATE_T
*state
)
2055 VCHIQ_STATUS_T status
= VCHIQ_ERROR
;
2056 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2061 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2062 status
= VCHIQ_SUCCESS
;
2065 switch (arm_state
->vc_suspend_state
) {
2066 case VC_SUSPEND_REQUESTED
:
2067 vchiq_log_info(vchiq_susp_log_level
, "%s: suspend already "
2068 "requested", __func__
);
2070 case VC_SUSPEND_IN_PROGRESS
:
2071 vchiq_log_info(vchiq_susp_log_level
, "%s: suspend already in "
2072 "progress", __func__
);
2076 /* We don't expect to be in other states, so log but continue
2078 vchiq_log_error(vchiq_susp_log_level
,
2079 "%s unexpected suspend state %s", __func__
,
2080 suspend_state_names
[arm_state
->vc_suspend_state
+
2081 VC_SUSPEND_NUM_OFFSET
]);
2083 case VC_SUSPEND_REJECTED
:
2084 case VC_SUSPEND_FAILED
:
2085 /* Ensure any idle state actions have been run */
2086 set_suspend_state(arm_state
, VC_SUSPEND_IDLE
);
2088 case VC_SUSPEND_IDLE
:
2089 vchiq_log_info(vchiq_susp_log_level
,
2090 "%s: suspending", __func__
);
2091 set_suspend_state(arm_state
, VC_SUSPEND_REQUESTED
);
2092 /* kick the slot handler thread to initiate suspend */
2093 request_poll(state
, NULL
, 0);
2098 vchiq_log_trace(vchiq_susp_log_level
, "%s exit %d", __func__
, status
);
2103 vchiq_platform_check_suspend(VCHIQ_STATE_T
*state
)
2105 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2111 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2113 write_lock_bh(&arm_state
->susp_res_lock
);
2114 if (arm_state
->vc_suspend_state
== VC_SUSPEND_REQUESTED
&&
2115 arm_state
->vc_resume_state
== VC_RESUME_RESUMED
) {
2116 set_suspend_state(arm_state
, VC_SUSPEND_IN_PROGRESS
);
2119 write_unlock_bh(&arm_state
->susp_res_lock
);
2122 vchiq_platform_suspend(state
);
2125 vchiq_log_trace(vchiq_susp_log_level
, "%s exit", __func__
);
2131 output_timeout_error(VCHIQ_STATE_T
*state
)
2133 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2135 int vc_use_count
= arm_state
->videocore_use_count
;
2136 int active_services
= state
->unused_service
;
2139 if (!arm_state
->videocore_use_count
) {
2140 snprintf(err
, sizeof(err
), " Videocore usecount is 0");
2143 for (i
= 0; i
< active_services
; i
++) {
2144 VCHIQ_SERVICE_T
*service_ptr
= state
->services
[i
];
2145 if (service_ptr
&& service_ptr
->service_use_count
&&
2146 (service_ptr
->srvstate
!= VCHIQ_SRVSTATE_FREE
)) {
2147 snprintf(err
, sizeof(err
), " %c%c%c%c(%d) service has "
2148 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2149 service_ptr
->base
.fourcc
),
2150 service_ptr
->client_id
,
2151 service_ptr
->service_use_count
,
2152 service_ptr
->service_use_count
==
2153 vc_use_count
? "" : " (+ more)");
2159 vchiq_log_error(vchiq_susp_log_level
,
2160 "timed out waiting for vc suspend (%d).%s",
2161 arm_state
->autosuspend_override
, err
);
2165 /* Try to get videocore into suspended state, regardless of autosuspend state.
2166 ** We don't actually force suspend, since videocore may get into a bad state
2167 ** if we force suspend at a bad time. Instead, we wait for autosuspend to
2168 ** determine a good point to suspend. If this doesn't happen within 100ms we
2171 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2172 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2175 vchiq_arm_force_suspend(VCHIQ_STATE_T
*state
)
2177 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2178 VCHIQ_STATUS_T status
= VCHIQ_ERROR
;
2185 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2187 write_lock_bh(&arm_state
->susp_res_lock
);
2189 status
= block_resume(arm_state
);
2190 if (status
!= VCHIQ_SUCCESS
)
2192 if (arm_state
->vc_suspend_state
== VC_SUSPEND_SUSPENDED
) {
2193 /* Already suspended - just block resume and exit */
2194 vchiq_log_info(vchiq_susp_log_level
, "%s already suspended",
2196 status
= VCHIQ_SUCCESS
;
2198 } else if (arm_state
->vc_suspend_state
<= VC_SUSPEND_IDLE
) {
2199 /* initiate suspend immediately in the case that we're waiting
2200 * for the timeout */
2201 stop_suspend_timer(arm_state
);
2202 if (!vchiq_videocore_wanted(state
)) {
2203 vchiq_log_info(vchiq_susp_log_level
, "%s videocore "
2204 "idle, initiating suspend", __func__
);
2205 status
= vchiq_arm_vcsuspend(state
);
2206 } else if (arm_state
->autosuspend_override
<
2207 FORCE_SUSPEND_FAIL_MAX
) {
2208 vchiq_log_info(vchiq_susp_log_level
, "%s letting "
2209 "videocore go idle", __func__
);
2210 status
= VCHIQ_SUCCESS
;
2212 vchiq_log_warning(vchiq_susp_log_level
, "%s failed too "
2213 "many times - attempting suspend", __func__
);
2214 status
= vchiq_arm_vcsuspend(state
);
2217 vchiq_log_info(vchiq_susp_log_level
, "%s videocore suspend "
2218 "in progress - wait for completion", __func__
);
2219 status
= VCHIQ_SUCCESS
;
2222 /* Wait for suspend to happen due to system idle (not forced..) */
2223 if (status
!= VCHIQ_SUCCESS
)
2224 goto unblock_resume
;
2227 write_unlock_bh(&arm_state
->susp_res_lock
);
2229 rc
= wait_for_completion_interruptible_timeout(
2230 &arm_state
->vc_suspend_complete
,
2231 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS
));
2233 write_lock_bh(&arm_state
->susp_res_lock
);
2235 vchiq_log_warning(vchiq_susp_log_level
, "%s "
2236 "interrupted waiting for suspend", __func__
);
2237 status
= VCHIQ_ERROR
;
2238 goto unblock_resume
;
2239 } else if (rc
== 0) {
2240 if (arm_state
->vc_suspend_state
> VC_SUSPEND_IDLE
) {
2241 /* Repeat timeout once if in progress */
2247 arm_state
->autosuspend_override
++;
2248 output_timeout_error(state
);
2250 status
= VCHIQ_RETRY
;
2251 goto unblock_resume
;
2253 } while (0 < (repeat
--));
2255 /* Check and report state in case we need to abort ARM suspend */
2256 if (arm_state
->vc_suspend_state
!= VC_SUSPEND_SUSPENDED
) {
2257 status
= VCHIQ_RETRY
;
2258 vchiq_log_error(vchiq_susp_log_level
,
2259 "%s videocore suspend failed (state %s)", __func__
,
2260 suspend_state_names
[arm_state
->vc_suspend_state
+
2261 VC_SUSPEND_NUM_OFFSET
]);
2262 /* Reset the state only if it's still in an error state.
2263 * Something could have already initiated another suspend. */
2264 if (arm_state
->vc_suspend_state
< VC_SUSPEND_IDLE
)
2265 set_suspend_state(arm_state
, VC_SUSPEND_IDLE
);
2267 goto unblock_resume
;
2270 /* successfully suspended - unlock and exit */
2274 /* all error states need to unblock resume before exit */
2275 unblock_resume(arm_state
);
2278 write_unlock_bh(&arm_state
->susp_res_lock
);
2281 vchiq_log_trace(vchiq_susp_log_level
, "%s exit %d", __func__
, status
);
2286 vchiq_check_suspend(VCHIQ_STATE_T
*state
)
2288 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2293 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2295 write_lock_bh(&arm_state
->susp_res_lock
);
2296 if (arm_state
->vc_suspend_state
!= VC_SUSPEND_SUSPENDED
&&
2297 arm_state
->first_connect
&&
2298 !vchiq_videocore_wanted(state
)) {
2299 vchiq_arm_vcsuspend(state
);
2301 write_unlock_bh(&arm_state
->susp_res_lock
);
2304 vchiq_log_trace(vchiq_susp_log_level
, "%s exit", __func__
);
2310 vchiq_arm_allow_resume(VCHIQ_STATE_T
*state
)
2312 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2319 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2321 write_lock_bh(&arm_state
->susp_res_lock
);
2322 unblock_resume(arm_state
);
2323 resume
= vchiq_check_resume(state
);
2324 write_unlock_bh(&arm_state
->susp_res_lock
);
2327 if (wait_for_completion_interruptible(
2328 &arm_state
->vc_resume_complete
) < 0) {
2329 vchiq_log_error(vchiq_susp_log_level
,
2330 "%s interrupted", __func__
);
2331 /* failed, cannot accurately derive suspend
2332 * state, so exit early. */
2337 read_lock_bh(&arm_state
->susp_res_lock
);
2338 if (arm_state
->vc_suspend_state
== VC_SUSPEND_SUSPENDED
) {
2339 vchiq_log_info(vchiq_susp_log_level
,
2340 "%s: Videocore remains suspended", __func__
);
2342 vchiq_log_info(vchiq_susp_log_level
,
2343 "%s: Videocore resumed", __func__
);
2346 read_unlock_bh(&arm_state
->susp_res_lock
);
2348 vchiq_log_trace(vchiq_susp_log_level
, "%s exit %d", __func__
, ret
);
2352 /* This function should be called with the write lock held */
2354 vchiq_check_resume(VCHIQ_STATE_T
*state
)
2356 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2362 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2364 if (need_resume(state
)) {
2365 set_resume_state(arm_state
, VC_RESUME_REQUESTED
);
2366 request_poll(state
, NULL
, 0);
2371 vchiq_log_trace(vchiq_susp_log_level
, "%s exit", __func__
);
2376 vchiq_platform_check_resume(VCHIQ_STATE_T
*state
)
2378 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2384 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2386 write_lock_bh(&arm_state
->susp_res_lock
);
2387 if (arm_state
->wake_address
== 0) {
2388 vchiq_log_info(vchiq_susp_log_level
,
2389 "%s: already awake", __func__
);
2392 if (arm_state
->vc_resume_state
== VC_RESUME_IN_PROGRESS
) {
2393 vchiq_log_info(vchiq_susp_log_level
,
2394 "%s: already resuming", __func__
);
2398 if (arm_state
->vc_resume_state
== VC_RESUME_REQUESTED
) {
2399 set_resume_state(arm_state
, VC_RESUME_IN_PROGRESS
);
2402 vchiq_log_trace(vchiq_susp_log_level
,
2403 "%s: not resuming (resume state %s)", __func__
,
2404 resume_state_names
[arm_state
->vc_resume_state
+
2405 VC_RESUME_NUM_OFFSET
]);
2408 write_unlock_bh(&arm_state
->susp_res_lock
);
2411 vchiq_platform_resume(state
);
2414 vchiq_log_trace(vchiq_susp_log_level
, "%s exit", __func__
);
2422 vchiq_use_internal(VCHIQ_STATE_T
*state
, VCHIQ_SERVICE_T
*service
,
2423 enum USE_TYPE_E use_type
)
2425 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2426 VCHIQ_STATUS_T ret
= VCHIQ_SUCCESS
;
2429 int local_uc
, local_entity_uc
;
2434 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2436 if (use_type
== USE_TYPE_VCHIQ
) {
2437 sprintf(entity
, "VCHIQ: ");
2438 entity_uc
= &arm_state
->peer_use_count
;
2439 } else if (service
) {
2440 sprintf(entity
, "%c%c%c%c:%03d",
2441 VCHIQ_FOURCC_AS_4CHARS(service
->base
.fourcc
),
2442 service
->client_id
);
2443 entity_uc
= &service
->service_use_count
;
2445 vchiq_log_error(vchiq_susp_log_level
, "%s null service "
2451 write_lock_bh(&arm_state
->susp_res_lock
);
2452 while (arm_state
->resume_blocked
) {
2453 /* If we call 'use' while force suspend is waiting for suspend,
2454 * then we're about to block the thread which the force is
2455 * waiting to complete, so we're bound to just time out. In this
2456 * case, set the suspend state such that the wait will be
2457 * canceled, so we can complete as quickly as possible. */
2458 if (arm_state
->resume_blocked
&& arm_state
->vc_suspend_state
==
2460 set_suspend_state(arm_state
, VC_SUSPEND_FORCE_CANCELED
);
2463 /* If suspend is already in progress then we need to block */
2464 if (!try_wait_for_completion(&arm_state
->resume_blocker
)) {
2465 /* Indicate that there are threads waiting on the resume
2466 * blocker. These need to be allowed to complete before
2467 * a _second_ call to force suspend can complete,
2468 * otherwise low priority threads might never actually
2470 arm_state
->blocked_count
++;
2471 write_unlock_bh(&arm_state
->susp_res_lock
);
2472 vchiq_log_info(vchiq_susp_log_level
, "%s %s resume "
2473 "blocked - waiting...", __func__
, entity
);
2474 if (wait_for_completion_killable(
2475 &arm_state
->resume_blocker
) != 0) {
2476 vchiq_log_error(vchiq_susp_log_level
, "%s %s "
2477 "wait for resume blocker interrupted",
2480 write_lock_bh(&arm_state
->susp_res_lock
);
2481 arm_state
->blocked_count
--;
2482 write_unlock_bh(&arm_state
->susp_res_lock
);
2485 vchiq_log_info(vchiq_susp_log_level
, "%s %s resume "
2486 "unblocked", __func__
, entity
);
2487 write_lock_bh(&arm_state
->susp_res_lock
);
2488 if (--arm_state
->blocked_count
== 0)
2489 complete_all(&arm_state
->blocked_blocker
);
2493 stop_suspend_timer(arm_state
);
2495 local_uc
= ++arm_state
->videocore_use_count
;
2496 local_entity_uc
= ++(*entity_uc
);
2498 /* If there's a pending request which hasn't yet been serviced then
2499 * just clear it. If we're past VC_SUSPEND_REQUESTED state then
2500 * vc_resume_complete will block until we either resume or fail to
2502 if (arm_state
->vc_suspend_state
<= VC_SUSPEND_REQUESTED
)
2503 set_suspend_state(arm_state
, VC_SUSPEND_IDLE
);
2505 if ((use_type
!= USE_TYPE_SERVICE_NO_RESUME
) && need_resume(state
)) {
2506 set_resume_state(arm_state
, VC_RESUME_REQUESTED
);
2507 vchiq_log_info(vchiq_susp_log_level
,
2508 "%s %s count %d, state count %d",
2509 __func__
, entity
, local_entity_uc
, local_uc
);
2510 request_poll(state
, NULL
, 0);
2512 vchiq_log_trace(vchiq_susp_log_level
,
2513 "%s %s count %d, state count %d",
2514 __func__
, entity
, *entity_uc
, local_uc
);
2517 write_unlock_bh(&arm_state
->susp_res_lock
);
2519 /* Completion is in a done state when we're not suspended, so this won't
2520 * block for the non-suspended case. */
2521 if (!try_wait_for_completion(&arm_state
->vc_resume_complete
)) {
2522 vchiq_log_info(vchiq_susp_log_level
, "%s %s wait for resume",
2524 if (wait_for_completion_killable(
2525 &arm_state
->vc_resume_complete
) != 0) {
2526 vchiq_log_error(vchiq_susp_log_level
, "%s %s wait for "
2527 "resume interrupted", __func__
, entity
);
2531 vchiq_log_info(vchiq_susp_log_level
, "%s %s resumed", __func__
,
2535 if (ret
== VCHIQ_SUCCESS
) {
2536 VCHIQ_STATUS_T status
= VCHIQ_SUCCESS
;
2537 long ack_cnt
= atomic_xchg(&arm_state
->ka_use_ack_count
, 0);
2538 while (ack_cnt
&& (status
== VCHIQ_SUCCESS
)) {
2539 /* Send the use notify to videocore */
2540 status
= vchiq_send_remote_use_active(state
);
2541 if (status
== VCHIQ_SUCCESS
)
2545 &arm_state
->ka_use_ack_count
);
2550 vchiq_log_trace(vchiq_susp_log_level
, "%s exit %d", __func__
, ret
);
2555 vchiq_release_internal(VCHIQ_STATE_T
*state
, VCHIQ_SERVICE_T
*service
)
2557 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2558 VCHIQ_STATUS_T ret
= VCHIQ_SUCCESS
;
2561 int local_uc
, local_entity_uc
;
2566 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2569 sprintf(entity
, "%c%c%c%c:%03d",
2570 VCHIQ_FOURCC_AS_4CHARS(service
->base
.fourcc
),
2571 service
->client_id
);
2572 entity_uc
= &service
->service_use_count
;
2574 sprintf(entity
, "PEER: ");
2575 entity_uc
= &arm_state
->peer_use_count
;
2578 write_lock_bh(&arm_state
->susp_res_lock
);
2579 if (!arm_state
->videocore_use_count
|| !(*entity_uc
)) {
2580 /* Don't use BUG_ON - don't allow user thread to crash kernel */
2581 WARN_ON(!arm_state
->videocore_use_count
);
2582 WARN_ON(!(*entity_uc
));
2586 local_uc
= --arm_state
->videocore_use_count
;
2587 local_entity_uc
= --(*entity_uc
);
2589 if (!vchiq_videocore_wanted(state
)) {
2590 if (vchiq_platform_use_suspend_timer() &&
2591 !arm_state
->resume_blocked
) {
2592 /* Only use the timer if we're not trying to force
2593 * suspend (=> resume_blocked) */
2594 start_suspend_timer(arm_state
);
2596 vchiq_log_info(vchiq_susp_log_level
,
2597 "%s %s count %d, state count %d - suspending",
2598 __func__
, entity
, *entity_uc
,
2599 arm_state
->videocore_use_count
);
2600 vchiq_arm_vcsuspend(state
);
2603 vchiq_log_trace(vchiq_susp_log_level
,
2604 "%s %s count %d, state count %d",
2605 __func__
, entity
, *entity_uc
,
2606 arm_state
->videocore_use_count
);
2609 write_unlock_bh(&arm_state
->susp_res_lock
);
2612 vchiq_log_trace(vchiq_susp_log_level
, "%s exit %d", __func__
, ret
);
2617 vchiq_on_remote_use(VCHIQ_STATE_T
*state
)
2619 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2620 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2621 atomic_inc(&arm_state
->ka_use_count
);
2622 complete(&arm_state
->ka_evt
);
2626 vchiq_on_remote_release(VCHIQ_STATE_T
*state
)
2628 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2629 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2630 atomic_inc(&arm_state
->ka_release_count
);
2631 complete(&arm_state
->ka_evt
);
2635 vchiq_use_service_internal(VCHIQ_SERVICE_T
*service
)
2637 return vchiq_use_internal(service
->state
, service
, USE_TYPE_SERVICE
);
2641 vchiq_release_service_internal(VCHIQ_SERVICE_T
*service
)
2643 return vchiq_release_internal(service
->state
, service
);
2646 VCHIQ_DEBUGFS_NODE_T
*
2647 vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance
)
2649 return &instance
->debugfs_node
;
2653 vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance
)
2655 VCHIQ_SERVICE_T
*service
;
2656 int use_count
= 0, i
;
2658 while ((service
= next_service_by_instance(instance
->state
,
2659 instance
, &i
)) != NULL
) {
2660 use_count
+= service
->service_use_count
;
2661 unlock_service(service
);
2667 vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance
)
2669 return instance
->pid
;
2673 vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance
)
2675 return instance
->trace
;
2679 vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance
, int trace
)
2681 VCHIQ_SERVICE_T
*service
;
2684 while ((service
= next_service_by_instance(instance
->state
,
2685 instance
, &i
)) != NULL
) {
2686 service
->trace
= trace
;
2687 unlock_service(service
);
2689 instance
->trace
= (trace
!= 0);
2692 static void suspend_timer_callback(unsigned long context
)
2694 VCHIQ_STATE_T
*state
= (VCHIQ_STATE_T
*)context
;
2695 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2698 vchiq_log_info(vchiq_susp_log_level
,
2699 "%s - suspend timer expired - check suspend", __func__
);
2700 vchiq_check_suspend(state
);
2706 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle
)
2708 VCHIQ_STATUS_T ret
= VCHIQ_ERROR
;
2709 VCHIQ_SERVICE_T
*service
= find_service_by_handle(handle
);
2711 ret
= vchiq_use_internal(service
->state
, service
,
2712 USE_TYPE_SERVICE_NO_RESUME
);
2713 unlock_service(service
);
2719 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle
)
2721 VCHIQ_STATUS_T ret
= VCHIQ_ERROR
;
2722 VCHIQ_SERVICE_T
*service
= find_service_by_handle(handle
);
2724 ret
= vchiq_use_internal(service
->state
, service
,
2726 unlock_service(service
);
2732 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle
)
2734 VCHIQ_STATUS_T ret
= VCHIQ_ERROR
;
2735 VCHIQ_SERVICE_T
*service
= find_service_by_handle(handle
);
2737 ret
= vchiq_release_internal(service
->state
, service
);
2738 unlock_service(service
);
2744 vchiq_dump_service_use_state(VCHIQ_STATE_T
*state
)
2746 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2748 /* Only dump 64 services */
2749 static const int local_max_services
= 64;
2750 /* If there's more than 64 services, only dump ones with
2751 * non-zero counts */
2752 int only_nonzero
= 0;
2753 static const char *nz
= "<-- preventing suspend";
2755 enum vc_suspend_status vc_suspend_state
;
2756 enum vc_resume_status vc_resume_state
;
2759 int active_services
;
2760 struct service_data_struct
{
2764 } service_data
[local_max_services
];
2769 read_lock_bh(&arm_state
->susp_res_lock
);
2770 vc_suspend_state
= arm_state
->vc_suspend_state
;
2771 vc_resume_state
= arm_state
->vc_resume_state
;
2772 peer_count
= arm_state
->peer_use_count
;
2773 vc_use_count
= arm_state
->videocore_use_count
;
2774 active_services
= state
->unused_service
;
2775 if (active_services
> local_max_services
)
2778 for (i
= 0; (i
< active_services
) && (j
< local_max_services
); i
++) {
2779 VCHIQ_SERVICE_T
*service_ptr
= state
->services
[i
];
2783 if (only_nonzero
&& !service_ptr
->service_use_count
)
2786 if (service_ptr
->srvstate
!= VCHIQ_SRVSTATE_FREE
) {
2787 service_data
[j
].fourcc
= service_ptr
->base
.fourcc
;
2788 service_data
[j
].clientid
= service_ptr
->client_id
;
2789 service_data
[j
++].use_count
= service_ptr
->
2794 read_unlock_bh(&arm_state
->susp_res_lock
);
2796 vchiq_log_warning(vchiq_susp_log_level
,
2797 "-- Videcore suspend state: %s --",
2798 suspend_state_names
[vc_suspend_state
+ VC_SUSPEND_NUM_OFFSET
]);
2799 vchiq_log_warning(vchiq_susp_log_level
,
2800 "-- Videcore resume state: %s --",
2801 resume_state_names
[vc_resume_state
+ VC_RESUME_NUM_OFFSET
]);
2804 vchiq_log_warning(vchiq_susp_log_level
, "Too many active "
2805 "services (%d). Only dumping up to first %d services "
2806 "with non-zero use-count", active_services
,
2807 local_max_services
);
2809 for (i
= 0; i
< j
; i
++) {
2810 vchiq_log_warning(vchiq_susp_log_level
,
2811 "----- %c%c%c%c:%d service count %d %s",
2812 VCHIQ_FOURCC_AS_4CHARS(service_data
[i
].fourcc
),
2813 service_data
[i
].clientid
,
2814 service_data
[i
].use_count
,
2815 service_data
[i
].use_count
? nz
: "");
2817 vchiq_log_warning(vchiq_susp_log_level
,
2818 "----- VCHIQ use count count %d", peer_count
);
2819 vchiq_log_warning(vchiq_susp_log_level
,
2820 "--- Overall vchiq instance use count %d", vc_use_count
);
2822 vchiq_dump_platform_use_state(state
);
2826 vchiq_check_service(VCHIQ_SERVICE_T
*service
)
2828 VCHIQ_ARM_STATE_T
*arm_state
;
2829 VCHIQ_STATUS_T ret
= VCHIQ_ERROR
;
2831 if (!service
|| !service
->state
)
2834 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2836 arm_state
= vchiq_platform_get_arm_state(service
->state
);
2838 read_lock_bh(&arm_state
->susp_res_lock
);
2839 if (service
->service_use_count
)
2840 ret
= VCHIQ_SUCCESS
;
2841 read_unlock_bh(&arm_state
->susp_res_lock
);
2843 if (ret
== VCHIQ_ERROR
) {
2844 vchiq_log_error(vchiq_susp_log_level
,
2845 "%s ERROR - %c%c%c%c:%d service count %d, "
2846 "state count %d, videocore suspend state %s", __func__
,
2847 VCHIQ_FOURCC_AS_4CHARS(service
->base
.fourcc
),
2848 service
->client_id
, service
->service_use_count
,
2849 arm_state
->videocore_use_count
,
2850 suspend_state_names
[arm_state
->vc_suspend_state
+
2851 VC_SUSPEND_NUM_OFFSET
]);
2852 vchiq_dump_service_use_state(service
->state
);
2858 /* stub functions */
2859 void vchiq_on_remote_use_active(VCHIQ_STATE_T
*state
)
2864 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T
*state
,
2865 VCHIQ_CONNSTATE_T oldstate
, VCHIQ_CONNSTATE_T newstate
)
2867 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2868 vchiq_log_info(vchiq_susp_log_level
, "%d: %s->%s", state
->id
,
2869 get_conn_state_name(oldstate
), get_conn_state_name(newstate
));
2870 if (state
->conn_state
== VCHIQ_CONNSTATE_CONNECTED
) {
2871 write_lock_bh(&arm_state
->susp_res_lock
);
2872 if (!arm_state
->first_connect
) {
2873 char threadname
[10];
2874 arm_state
->first_connect
= 1;
2875 write_unlock_bh(&arm_state
->susp_res_lock
);
2876 snprintf(threadname
, sizeof(threadname
), "VCHIQka-%d",
2878 arm_state
->ka_thread
= kthread_create(
2879 &vchiq_keepalive_thread_func
,
2882 if (IS_ERR(arm_state
->ka_thread
)) {
2883 vchiq_log_error(vchiq_susp_log_level
,
2884 "vchiq: FATAL: couldn't create thread %s",
2887 wake_up_process(arm_state
->ka_thread
);
2890 write_unlock_bh(&arm_state
->susp_res_lock
);
2894 static int vchiq_probe(struct platform_device
*pdev
)
2896 struct device_node
*fw_node
;
2897 struct rpi_firmware
*fw
;
2901 fw_node
= of_parse_phandle(pdev
->dev
.of_node
, "firmware", 0);
2903 dev_err(&pdev
->dev
, "Missing firmware node\n");
2907 fw
= rpi_firmware_get(fw_node
);
2908 of_node_put(fw_node
);
2910 return -EPROBE_DEFER
;
2912 platform_set_drvdata(pdev
, fw
);
2914 err
= vchiq_platform_init(pdev
, &g_state
);
2916 goto failed_platform_init
;
2918 err
= alloc_chrdev_region(&vchiq_devid
, VCHIQ_MINOR
, 1, DEVICE_NAME
);
2920 vchiq_log_error(vchiq_arm_log_level
,
2921 "Unable to allocate device number");
2922 goto failed_platform_init
;
2924 cdev_init(&vchiq_cdev
, &vchiq_fops
);
2925 vchiq_cdev
.owner
= THIS_MODULE
;
2926 err
= cdev_add(&vchiq_cdev
, vchiq_devid
, 1);
2928 vchiq_log_error(vchiq_arm_log_level
,
2929 "Unable to register device");
2930 goto failed_cdev_add
;
2933 /* create sysfs entries */
2934 vchiq_class
= class_create(THIS_MODULE
, DEVICE_NAME
);
2935 ptr_err
= vchiq_class
;
2936 if (IS_ERR(ptr_err
))
2937 goto failed_class_create
;
2939 vchiq_dev
= device_create(vchiq_class
, NULL
,
2940 vchiq_devid
, NULL
, "vchiq");
2941 ptr_err
= vchiq_dev
;
2942 if (IS_ERR(ptr_err
))
2943 goto failed_device_create
;
2945 /* create debugfs entries */
2946 err
= vchiq_debugfs_init();
2948 goto failed_debugfs_init
;
2950 vchiq_log_info(vchiq_arm_log_level
,
2951 "vchiq: initialised - version %d (min %d), device %d.%d",
2952 VCHIQ_VERSION
, VCHIQ_VERSION_MIN
,
2953 MAJOR(vchiq_devid
), MINOR(vchiq_devid
));
2957 failed_debugfs_init
:
2958 device_destroy(vchiq_class
, vchiq_devid
);
2959 failed_device_create
:
2960 class_destroy(vchiq_class
);
2961 failed_class_create
:
2962 cdev_del(&vchiq_cdev
);
2963 err
= PTR_ERR(ptr_err
);
2965 unregister_chrdev_region(vchiq_devid
, 1);
2966 failed_platform_init
:
2967 vchiq_log_warning(vchiq_arm_log_level
, "could not load vchiq");
2971 static int vchiq_remove(struct platform_device
*pdev
)
2973 vchiq_debugfs_deinit();
2974 device_destroy(vchiq_class
, vchiq_devid
);
2975 class_destroy(vchiq_class
);
2976 cdev_del(&vchiq_cdev
);
2977 unregister_chrdev_region(vchiq_devid
, 1);
2982 static const struct of_device_id vchiq_of_match
[] = {
2983 { .compatible
= "brcm,bcm2835-vchiq", },
2986 MODULE_DEVICE_TABLE(of
, vchiq_of_match
);
2988 static struct platform_driver vchiq_driver
= {
2990 .name
= "bcm2835_vchiq",
2991 .of_match_table
= vchiq_of_match
,
2993 .probe
= vchiq_probe
,
2994 .remove
= vchiq_remove
,
2996 module_platform_driver(vchiq_driver
);
2998 MODULE_LICENSE("GPL");
2999 MODULE_DESCRIPTION("Videocore VCHIQ driver");
3000 MODULE_AUTHOR("Broadcom Corporation");