2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
3 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The names of the above-listed copyright holders may not be used
15 * to endorse or promote products derived from this software without
16 * specific prior written permission.
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2, as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/sched/signal.h>
38 #include <linux/types.h>
39 #include <linux/errno.h>
40 #include <linux/cdev.h>
42 #include <linux/device.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/bug.h>
47 #include <linux/semaphore.h>
48 #include <linux/list.h>
50 #include <linux/platform_device.h>
51 #include <linux/compat.h>
52 #include <soc/bcm2835/raspberrypi-firmware.h>
54 #include "vchiq_core.h"
55 #include "vchiq_ioctl.h"
56 #include "vchiq_arm.h"
57 #include "vchiq_debugfs.h"
58 #include "vchiq_killable.h"
60 #define DEVICE_NAME "vchiq"
62 /* Override the default prefix, which would be vchiq_arm (from the filename) */
63 #undef MODULE_PARAM_PREFIX
64 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
68 /* Some per-instance constants */
69 #define MAX_COMPLETIONS 128
70 #define MAX_SERVICES 64
71 #define MAX_ELEMENTS 8
72 #define MSG_QUEUE_SIZE 128
74 #define KEEPALIVE_VER 1
75 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
77 /* Run time control of log level, based on KERN_XXX level. */
78 int vchiq_arm_log_level
= VCHIQ_LOG_DEFAULT
;
79 int vchiq_susp_log_level
= VCHIQ_LOG_ERROR
;
81 #define SUSPEND_TIMER_TIMEOUT_MS 100
82 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
84 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
85 static const char *const suspend_state_names
[] = {
86 "VC_SUSPEND_FORCE_CANCELED",
87 "VC_SUSPEND_REJECTED",
90 "VC_SUSPEND_REQUESTED",
91 "VC_SUSPEND_IN_PROGRESS",
92 "VC_SUSPEND_SUSPENDED"
94 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
95 static const char *const resume_state_names
[] = {
98 "VC_RESUME_REQUESTED",
99 "VC_RESUME_IN_PROGRESS",
102 /* The number of times we allow force suspend to timeout before actually
103 ** _forcing_ suspend. This is to cater for SW which fails to release vchiq
104 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
106 #define FORCE_SUSPEND_FAIL_MAX 8
108 /* The time in ms allowed for videocore to go idle when force suspend has been
110 #define FORCE_SUSPEND_TIMEOUT_MS 200
113 static void suspend_timer_callback(unsigned long context
);
116 typedef struct user_service_struct
{
117 VCHIQ_SERVICE_T
*service
;
119 VCHIQ_INSTANCE_T instance
;
121 char dequeue_pending
;
123 int message_available_pos
;
126 struct semaphore insert_event
;
127 struct semaphore remove_event
;
128 struct semaphore close_event
;
129 VCHIQ_HEADER_T
* msg_queue
[MSG_QUEUE_SIZE
];
132 struct bulk_waiter_node
{
133 struct bulk_waiter bulk_waiter
;
135 struct list_head list
;
138 struct vchiq_instance_struct
{
139 VCHIQ_STATE_T
*state
;
140 VCHIQ_COMPLETION_DATA_T completions
[MAX_COMPLETIONS
];
141 int completion_insert
;
142 int completion_remove
;
143 struct semaphore insert_event
;
144 struct semaphore remove_event
;
145 struct mutex completion_mutex
;
151 int use_close_delivered
;
154 struct list_head bulk_waiter_list
;
155 struct mutex bulk_waiter_list_mutex
;
157 VCHIQ_DEBUGFS_NODE_T debugfs_node
;
160 typedef struct dump_context_struct
{
167 static struct cdev vchiq_cdev
;
168 static dev_t vchiq_devid
;
169 static VCHIQ_STATE_T g_state
;
170 static struct class *vchiq_class
;
171 static struct device
*vchiq_dev
;
172 static DEFINE_SPINLOCK(msg_queue_spinlock
);
174 static const char *const ioctl_names
[] = {
180 "QUEUE_BULK_TRANSMIT",
181 "QUEUE_BULK_RECEIVE",
189 "SET_SERVICE_OPTION",
195 vchiq_static_assert(ARRAY_SIZE(ioctl_names
) ==
196 (VCHIQ_IOC_MAX
+ 1));
198 #if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
200 dump_phys_mem(void *virt_addr
, u32 num_bytes
);
203 /****************************************************************************
207 ***************************************************************************/
209 static VCHIQ_STATUS_T
210 add_completion(VCHIQ_INSTANCE_T instance
, VCHIQ_REASON_T reason
,
211 VCHIQ_HEADER_T
*header
, USER_SERVICE_T
*user_service
,
214 VCHIQ_COMPLETION_DATA_T
*completion
;
217 DEBUG_INITIALISE(g_state
.local
)
219 insert
= instance
->completion_insert
;
220 while ((insert
- instance
->completion_remove
) >= MAX_COMPLETIONS
) {
221 /* Out of space - wait for the client */
222 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
223 vchiq_log_trace(vchiq_arm_log_level
,
224 "add_completion - completion queue full");
225 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT
);
226 if (down_interruptible(&instance
->remove_event
) != 0) {
227 vchiq_log_info(vchiq_arm_log_level
,
228 "service_callback interrupted");
230 } else if (instance
->closing
) {
231 vchiq_log_info(vchiq_arm_log_level
,
232 "service_callback closing");
233 return VCHIQ_SUCCESS
;
235 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
238 completion
= &instance
->completions
[insert
& (MAX_COMPLETIONS
- 1)];
240 completion
->header
= header
;
241 completion
->reason
= reason
;
242 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
243 completion
->service_userdata
= user_service
->service
;
244 completion
->bulk_userdata
= bulk_userdata
;
246 if (reason
== VCHIQ_SERVICE_CLOSED
) {
247 /* Take an extra reference, to be held until
248 this CLOSED notification is delivered. */
249 lock_service(user_service
->service
);
250 if (instance
->use_close_delivered
)
251 user_service
->close_pending
= 1;
254 /* A write barrier is needed here to ensure that the entire completion
255 record is written out before the insert point. */
258 if (reason
== VCHIQ_MESSAGE_AVAILABLE
)
259 user_service
->message_available_pos
= insert
;
262 instance
->completion_insert
= insert
;
264 up(&instance
->insert_event
);
266 return VCHIQ_SUCCESS
;
269 /****************************************************************************
273 ***************************************************************************/
275 static VCHIQ_STATUS_T
276 service_callback(VCHIQ_REASON_T reason
, VCHIQ_HEADER_T
*header
,
277 VCHIQ_SERVICE_HANDLE_T handle
, void *bulk_userdata
)
279 /* How do we ensure the callback goes to the right client?
280 ** The service_user data points to a USER_SERVICE_T record containing
281 ** the original callback and the user state structure, which contains a
282 ** circular buffer for completion records.
284 USER_SERVICE_T
*user_service
;
285 VCHIQ_SERVICE_T
*service
;
286 VCHIQ_INSTANCE_T instance
;
287 bool skip_completion
= false;
289 DEBUG_INITIALISE(g_state
.local
)
291 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
293 service
= handle_to_service(handle
);
295 user_service
= (USER_SERVICE_T
*)service
->base
.userdata
;
296 instance
= user_service
->instance
;
298 if (!instance
|| instance
->closing
)
299 return VCHIQ_SUCCESS
;
301 vchiq_log_trace(vchiq_arm_log_level
,
302 "service_callback - service %lx(%d,%p), reason %d, header %lx, "
303 "instance %lx, bulk_userdata %lx",
304 (unsigned long)user_service
,
305 service
->localport
, user_service
->userdata
,
306 reason
, (unsigned long)header
,
307 (unsigned long)instance
, (unsigned long)bulk_userdata
);
309 if (header
&& user_service
->is_vchi
) {
310 spin_lock(&msg_queue_spinlock
);
311 while (user_service
->msg_insert
==
312 (user_service
->msg_remove
+ MSG_QUEUE_SIZE
)) {
313 spin_unlock(&msg_queue_spinlock
);
314 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
315 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT
);
316 vchiq_log_trace(vchiq_arm_log_level
,
317 "service_callback - msg queue full");
318 /* If there is no MESSAGE_AVAILABLE in the completion
321 if ((user_service
->message_available_pos
-
322 instance
->completion_remove
) < 0) {
323 VCHIQ_STATUS_T status
;
325 vchiq_log_info(vchiq_arm_log_level
,
326 "Inserting extra MESSAGE_AVAILABLE");
327 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
328 status
= add_completion(instance
, reason
,
329 NULL
, user_service
, bulk_userdata
);
330 if (status
!= VCHIQ_SUCCESS
) {
331 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
336 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
337 if (down_interruptible(&user_service
->remove_event
)
339 vchiq_log_info(vchiq_arm_log_level
,
340 "service_callback interrupted");
341 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
343 } else if (instance
->closing
) {
344 vchiq_log_info(vchiq_arm_log_level
,
345 "service_callback closing");
346 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
349 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
350 spin_lock(&msg_queue_spinlock
);
353 user_service
->msg_queue
[user_service
->msg_insert
&
354 (MSG_QUEUE_SIZE
- 1)] = header
;
355 user_service
->msg_insert
++;
357 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
358 ** there is a MESSAGE_AVAILABLE in the completion queue then
359 ** bypass the completion queue.
361 if (((user_service
->message_available_pos
-
362 instance
->completion_remove
) >= 0) ||
363 user_service
->dequeue_pending
) {
364 user_service
->dequeue_pending
= 0;
365 skip_completion
= true;
368 spin_unlock(&msg_queue_spinlock
);
369 up(&user_service
->insert_event
);
373 DEBUG_TRACE(SERVICE_CALLBACK_LINE
);
376 return VCHIQ_SUCCESS
;
378 return add_completion(instance
, reason
, header
, user_service
,
382 /****************************************************************************
386 ***************************************************************************/
388 user_service_free(void *userdata
)
393 /****************************************************************************
397 ***************************************************************************/
398 static void close_delivered(USER_SERVICE_T
*user_service
)
400 vchiq_log_info(vchiq_arm_log_level
,
401 "close_delivered(handle=%x)",
402 user_service
->service
->handle
);
404 if (user_service
->close_pending
) {
405 /* Allow the underlying service to be culled */
406 unlock_service(user_service
->service
);
408 /* Wake the user-thread blocked in close_ or remove_service */
409 up(&user_service
->close_event
);
411 user_service
->close_pending
= 0;
415 struct vchiq_io_copy_callback_context
{
416 struct vchiq_element
*current_element
;
417 size_t current_element_offset
;
418 unsigned long elements_to_go
;
419 size_t current_offset
;
423 vchiq_ioc_copy_element_data(
430 size_t bytes_this_round
;
431 struct vchiq_io_copy_callback_context
*copy_context
=
432 (struct vchiq_io_copy_callback_context
*)context
;
434 if (offset
!= copy_context
->current_offset
)
437 if (!copy_context
->elements_to_go
)
441 * Complex logic here to handle the case of 0 size elements
442 * in the middle of the array of elements.
444 * Need to skip over these 0 size elements.
447 bytes_this_round
= min(copy_context
->current_element
->size
-
448 copy_context
->current_element_offset
,
451 if (bytes_this_round
)
454 copy_context
->elements_to_go
--;
455 copy_context
->current_element
++;
456 copy_context
->current_element_offset
= 0;
458 if (!copy_context
->elements_to_go
)
462 res
= copy_from_user(dest
,
463 copy_context
->current_element
->data
+
464 copy_context
->current_element_offset
,
470 copy_context
->current_element_offset
+= bytes_this_round
;
471 copy_context
->current_offset
+= bytes_this_round
;
474 * Check if done with current element, and if so advance to the next.
476 if (copy_context
->current_element_offset
==
477 copy_context
->current_element
->size
) {
478 copy_context
->elements_to_go
--;
479 copy_context
->current_element
++;
480 copy_context
->current_element_offset
= 0;
483 return bytes_this_round
;
486 /**************************************************************************
488 * vchiq_ioc_queue_message
490 **************************************************************************/
491 static VCHIQ_STATUS_T
492 vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle
,
493 struct vchiq_element
*elements
,
496 struct vchiq_io_copy_callback_context context
;
498 size_t total_size
= 0;
500 context
.current_element
= elements
;
501 context
.current_element_offset
= 0;
502 context
.elements_to_go
= count
;
503 context
.current_offset
= 0;
505 for (i
= 0; i
< count
; i
++) {
506 if (!elements
[i
].data
&& elements
[i
].size
!= 0)
509 total_size
+= elements
[i
].size
;
512 return vchiq_queue_message(handle
, vchiq_ioc_copy_element_data
,
513 &context
, total_size
);
516 /****************************************************************************
520 ***************************************************************************/
522 vchiq_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
524 VCHIQ_INSTANCE_T instance
= file
->private_data
;
525 VCHIQ_STATUS_T status
= VCHIQ_SUCCESS
;
526 VCHIQ_SERVICE_T
*service
= NULL
;
530 DEBUG_INITIALISE(g_state
.local
)
532 vchiq_log_trace(vchiq_arm_log_level
,
533 "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
535 ((_IOC_TYPE(cmd
) == VCHIQ_IOC_MAGIC
) &&
536 (_IOC_NR(cmd
) <= VCHIQ_IOC_MAX
)) ?
537 ioctl_names
[_IOC_NR(cmd
)] : "<invalid>", arg
);
540 case VCHIQ_IOC_SHUTDOWN
:
541 if (!instance
->connected
)
544 /* Remove all services */
546 while ((service
= next_service_by_instance(instance
->state
,
547 instance
, &i
)) != NULL
) {
548 status
= vchiq_remove_service(service
->handle
);
549 unlock_service(service
);
550 if (status
!= VCHIQ_SUCCESS
)
555 if (status
== VCHIQ_SUCCESS
) {
556 /* Wake the completion thread and ask it to exit */
557 instance
->closing
= 1;
558 up(&instance
->insert_event
);
563 case VCHIQ_IOC_CONNECT
:
564 if (instance
->connected
) {
568 rc
= mutex_lock_killable(&instance
->state
->mutex
);
570 vchiq_log_error(vchiq_arm_log_level
,
571 "vchiq: connect: could not lock mutex for "
573 instance
->state
->id
, rc
);
577 status
= vchiq_connect_internal(instance
->state
, instance
);
578 mutex_unlock(&instance
->state
->mutex
);
580 if (status
== VCHIQ_SUCCESS
)
581 instance
->connected
= 1;
583 vchiq_log_error(vchiq_arm_log_level
,
584 "vchiq: could not connect: %d", status
);
587 case VCHIQ_IOC_CREATE_SERVICE
: {
588 VCHIQ_CREATE_SERVICE_T args
;
589 USER_SERVICE_T
*user_service
= NULL
;
594 (&args
, (const void __user
*)arg
,
595 sizeof(args
)) != 0) {
600 user_service
= kmalloc(sizeof(USER_SERVICE_T
), GFP_KERNEL
);
607 if (!instance
->connected
) {
612 srvstate
= VCHIQ_SRVSTATE_OPENING
;
615 instance
->connected
?
616 VCHIQ_SRVSTATE_LISTENING
:
617 VCHIQ_SRVSTATE_HIDDEN
;
620 userdata
= args
.params
.userdata
;
621 args
.params
.callback
= service_callback
;
622 args
.params
.userdata
= user_service
;
623 service
= vchiq_add_service_internal(
625 &args
.params
, srvstate
,
626 instance
, user_service_free
);
628 if (service
!= NULL
) {
629 user_service
->service
= service
;
630 user_service
->userdata
= userdata
;
631 user_service
->instance
= instance
;
632 user_service
->is_vchi
= (args
.is_vchi
!= 0);
633 user_service
->dequeue_pending
= 0;
634 user_service
->close_pending
= 0;
635 user_service
->message_available_pos
=
636 instance
->completion_remove
- 1;
637 user_service
->msg_insert
= 0;
638 user_service
->msg_remove
= 0;
639 sema_init(&user_service
->insert_event
, 0);
640 sema_init(&user_service
->remove_event
, 0);
641 sema_init(&user_service
->close_event
, 0);
644 status
= vchiq_open_service_internal
645 (service
, instance
->pid
);
646 if (status
!= VCHIQ_SUCCESS
) {
647 vchiq_remove_service(service
->handle
);
649 ret
= (status
== VCHIQ_RETRY
) ?
655 if (copy_to_user((void __user
*)
656 &(((VCHIQ_CREATE_SERVICE_T __user
*)
658 (const void *)&service
->handle
,
659 sizeof(service
->handle
)) != 0) {
661 vchiq_remove_service(service
->handle
);
671 case VCHIQ_IOC_CLOSE_SERVICE
: {
672 VCHIQ_SERVICE_HANDLE_T handle
= (VCHIQ_SERVICE_HANDLE_T
)arg
;
674 service
= find_service_for_instance(instance
, handle
);
675 if (service
!= NULL
) {
676 USER_SERVICE_T
*user_service
=
677 (USER_SERVICE_T
*)service
->base
.userdata
;
678 /* close_pending is false on first entry, and when the
679 wait in vchiq_close_service has been interrupted. */
680 if (!user_service
->close_pending
) {
681 status
= vchiq_close_service(service
->handle
);
682 if (status
!= VCHIQ_SUCCESS
)
686 /* close_pending is true once the underlying service
687 has been closed until the client library calls the
688 CLOSE_DELIVERED ioctl, signalling close_event. */
689 if (user_service
->close_pending
&&
690 down_interruptible(&user_service
->close_event
))
691 status
= VCHIQ_RETRY
;
697 case VCHIQ_IOC_REMOVE_SERVICE
: {
698 VCHIQ_SERVICE_HANDLE_T handle
= (VCHIQ_SERVICE_HANDLE_T
)arg
;
700 service
= find_service_for_instance(instance
, handle
);
701 if (service
!= NULL
) {
702 USER_SERVICE_T
*user_service
=
703 (USER_SERVICE_T
*)service
->base
.userdata
;
704 /* close_pending is false on first entry, and when the
705 wait in vchiq_close_service has been interrupted. */
706 if (!user_service
->close_pending
) {
707 status
= vchiq_remove_service(service
->handle
);
708 if (status
!= VCHIQ_SUCCESS
)
712 /* close_pending is true once the underlying service
713 has been closed until the client library calls the
714 CLOSE_DELIVERED ioctl, signalling close_event. */
715 if (user_service
->close_pending
&&
716 down_interruptible(&user_service
->close_event
))
717 status
= VCHIQ_RETRY
;
723 case VCHIQ_IOC_USE_SERVICE
:
724 case VCHIQ_IOC_RELEASE_SERVICE
: {
725 VCHIQ_SERVICE_HANDLE_T handle
= (VCHIQ_SERVICE_HANDLE_T
)arg
;
727 service
= find_service_for_instance(instance
, handle
);
728 if (service
!= NULL
) {
729 status
= (cmd
== VCHIQ_IOC_USE_SERVICE
) ?
730 vchiq_use_service_internal(service
) :
731 vchiq_release_service_internal(service
);
732 if (status
!= VCHIQ_SUCCESS
) {
733 vchiq_log_error(vchiq_susp_log_level
,
734 "%s: cmd %s returned error %d for "
735 "service %c%c%c%c:%03d",
737 (cmd
== VCHIQ_IOC_USE_SERVICE
) ?
738 "VCHIQ_IOC_USE_SERVICE" :
739 "VCHIQ_IOC_RELEASE_SERVICE",
741 VCHIQ_FOURCC_AS_4CHARS(
742 service
->base
.fourcc
),
750 case VCHIQ_IOC_QUEUE_MESSAGE
: {
751 VCHIQ_QUEUE_MESSAGE_T args
;
754 (&args
, (const void __user
*)arg
,
755 sizeof(args
)) != 0) {
760 service
= find_service_for_instance(instance
, args
.handle
);
762 if ((service
!= NULL
) && (args
.count
<= MAX_ELEMENTS
)) {
763 /* Copy elements into kernel space */
764 struct vchiq_element elements
[MAX_ELEMENTS
];
766 if (copy_from_user(elements
, args
.elements
,
767 args
.count
* sizeof(struct vchiq_element
)) == 0)
768 status
= vchiq_ioc_queue_message
770 elements
, args
.count
);
778 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT
:
779 case VCHIQ_IOC_QUEUE_BULK_RECEIVE
: {
780 VCHIQ_QUEUE_BULK_TRANSFER_T args
;
781 struct bulk_waiter_node
*waiter
= NULL
;
783 VCHIQ_BULK_DIR_T dir
=
784 (cmd
== VCHIQ_IOC_QUEUE_BULK_TRANSMIT
) ?
785 VCHIQ_BULK_TRANSMIT
: VCHIQ_BULK_RECEIVE
;
788 (&args
, (const void __user
*)arg
,
789 sizeof(args
)) != 0) {
794 service
= find_service_for_instance(instance
, args
.handle
);
800 if (args
.mode
== VCHIQ_BULK_MODE_BLOCKING
) {
801 waiter
= kzalloc(sizeof(struct bulk_waiter_node
),
807 args
.userdata
= &waiter
->bulk_waiter
;
808 } else if (args
.mode
== VCHIQ_BULK_MODE_WAITING
) {
809 struct list_head
*pos
;
811 mutex_lock(&instance
->bulk_waiter_list_mutex
);
812 list_for_each(pos
, &instance
->bulk_waiter_list
) {
813 if (list_entry(pos
, struct bulk_waiter_node
,
814 list
)->pid
== current
->pid
) {
815 waiter
= list_entry(pos
,
816 struct bulk_waiter_node
,
823 mutex_unlock(&instance
->bulk_waiter_list_mutex
);
825 vchiq_log_error(vchiq_arm_log_level
,
826 "no bulk_waiter found for pid %d",
831 vchiq_log_info(vchiq_arm_log_level
,
832 "found bulk_waiter %pK for pid %d", waiter
,
834 args
.userdata
= &waiter
->bulk_waiter
;
836 status
= vchiq_bulk_transfer
838 VCHI_MEM_HANDLE_INVALID
,
839 args
.data
, args
.size
,
840 args
.userdata
, args
.mode
,
844 if ((status
!= VCHIQ_RETRY
) || fatal_signal_pending(current
) ||
845 !waiter
->bulk_waiter
.bulk
) {
846 if (waiter
->bulk_waiter
.bulk
) {
847 /* Cancel the signal when the transfer
849 spin_lock(&bulk_waiter_spinlock
);
850 waiter
->bulk_waiter
.bulk
->userdata
= NULL
;
851 spin_unlock(&bulk_waiter_spinlock
);
855 const VCHIQ_BULK_MODE_T mode_waiting
=
856 VCHIQ_BULK_MODE_WAITING
;
857 waiter
->pid
= current
->pid
;
858 mutex_lock(&instance
->bulk_waiter_list_mutex
);
859 list_add(&waiter
->list
, &instance
->bulk_waiter_list
);
860 mutex_unlock(&instance
->bulk_waiter_list_mutex
);
861 vchiq_log_info(vchiq_arm_log_level
,
862 "saved bulk_waiter %pK for pid %d",
863 waiter
, current
->pid
);
865 if (copy_to_user((void __user
*)
866 &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user
*)
868 (const void *)&mode_waiting
,
869 sizeof(mode_waiting
)) != 0)
874 case VCHIQ_IOC_AWAIT_COMPLETION
: {
875 VCHIQ_AWAIT_COMPLETION_T args
;
877 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
878 if (!instance
->connected
) {
883 if (copy_from_user(&args
, (const void __user
*)arg
,
884 sizeof(args
)) != 0) {
889 mutex_lock(&instance
->completion_mutex
);
891 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
892 while ((instance
->completion_remove
==
893 instance
->completion_insert
)
894 && !instance
->closing
) {
897 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
898 mutex_unlock(&instance
->completion_mutex
);
899 rc
= down_interruptible(&instance
->insert_event
);
900 mutex_lock(&instance
->completion_mutex
);
902 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
903 vchiq_log_info(vchiq_arm_log_level
,
904 "AWAIT_COMPLETION interrupted");
909 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
912 int msgbufcount
= args
.msgbufcount
;
913 int remove
= instance
->completion_remove
;
915 for (ret
= 0; ret
< args
.count
; ret
++) {
916 VCHIQ_COMPLETION_DATA_T
*completion
;
917 VCHIQ_SERVICE_T
*service
;
918 USER_SERVICE_T
*user_service
;
919 VCHIQ_HEADER_T
*header
;
921 if (remove
== instance
->completion_insert
)
924 completion
= &instance
->completions
[
925 remove
& (MAX_COMPLETIONS
- 1)];
928 * A read memory barrier is needed to stop
929 * prefetch of a stale completion record
933 service
= completion
->service_userdata
;
934 user_service
= service
->base
.userdata
;
935 completion
->service_userdata
=
936 user_service
->userdata
;
938 header
= completion
->header
;
943 msglen
= header
->size
+
944 sizeof(VCHIQ_HEADER_T
);
945 /* This must be a VCHIQ-style service */
946 if (args
.msgbufsize
< msglen
) {
949 "header %pK: msgbufsize %x < msglen %x",
950 header
, args
.msgbufsize
,
952 WARN(1, "invalid message "
958 if (msgbufcount
<= 0)
959 /* Stall here for lack of a
960 ** buffer for the message. */
962 /* Get the pointer from user space */
964 if (copy_from_user(&msgbuf
,
965 (const void __user
*)
966 &args
.msgbufs
[msgbufcount
],
967 sizeof(msgbuf
)) != 0) {
973 /* Copy the message to user space */
974 if (copy_to_user(msgbuf
, header
,
981 /* Now it has been copied, the message
982 ** can be released. */
983 vchiq_release_message(service
->handle
,
986 /* The completion must point to the
988 completion
->header
= msgbuf
;
991 if ((completion
->reason
==
992 VCHIQ_SERVICE_CLOSED
) &&
993 !instance
->use_close_delivered
)
994 unlock_service(service
);
996 if (copy_to_user((void __user
*)(
998 ret
* sizeof(VCHIQ_COMPLETION_DATA_T
)),
1000 sizeof(VCHIQ_COMPLETION_DATA_T
)) != 0) {
1007 * Ensure that the above copy has completed
1008 * before advancing the remove pointer.
1012 instance
->completion_remove
= remove
;
1015 if (msgbufcount
!= args
.msgbufcount
) {
1016 if (copy_to_user((void __user
*)
1017 &((VCHIQ_AWAIT_COMPLETION_T
*)arg
)->
1020 sizeof(msgbufcount
)) != 0) {
1027 up(&instance
->remove_event
);
1028 mutex_unlock(&instance
->completion_mutex
);
1029 DEBUG_TRACE(AWAIT_COMPLETION_LINE
);
1032 case VCHIQ_IOC_DEQUEUE_MESSAGE
: {
1033 VCHIQ_DEQUEUE_MESSAGE_T args
;
1034 USER_SERVICE_T
*user_service
;
1035 VCHIQ_HEADER_T
*header
;
1037 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE
);
1039 (&args
, (const void __user
*)arg
,
1040 sizeof(args
)) != 0) {
1044 service
= find_service_for_instance(instance
, args
.handle
);
1049 user_service
= (USER_SERVICE_T
*)service
->base
.userdata
;
1050 if (user_service
->is_vchi
== 0) {
1055 spin_lock(&msg_queue_spinlock
);
1056 if (user_service
->msg_remove
== user_service
->msg_insert
) {
1057 if (!args
.blocking
) {
1058 spin_unlock(&msg_queue_spinlock
);
1059 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE
);
1063 user_service
->dequeue_pending
= 1;
1065 spin_unlock(&msg_queue_spinlock
);
1066 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE
);
1067 if (down_interruptible(
1068 &user_service
->insert_event
) != 0) {
1069 vchiq_log_info(vchiq_arm_log_level
,
1070 "DEQUEUE_MESSAGE interrupted");
1074 spin_lock(&msg_queue_spinlock
);
1075 } while (user_service
->msg_remove
==
1076 user_service
->msg_insert
);
1082 BUG_ON((int)(user_service
->msg_insert
-
1083 user_service
->msg_remove
) < 0);
1085 header
= user_service
->msg_queue
[user_service
->msg_remove
&
1086 (MSG_QUEUE_SIZE
- 1)];
1087 user_service
->msg_remove
++;
1088 spin_unlock(&msg_queue_spinlock
);
1090 up(&user_service
->remove_event
);
1093 else if (header
->size
<= args
.bufsize
) {
1094 /* Copy to user space if msgbuf is not NULL */
1095 if ((args
.buf
== NULL
) ||
1096 (copy_to_user((void __user
*)args
.buf
,
1098 header
->size
) == 0)) {
1100 vchiq_release_message(
1106 vchiq_log_error(vchiq_arm_log_level
,
1107 "header %pK: bufsize %x < size %x",
1108 header
, args
.bufsize
, header
->size
);
1109 WARN(1, "invalid size\n");
1112 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE
);
1115 case VCHIQ_IOC_GET_CLIENT_ID
: {
1116 VCHIQ_SERVICE_HANDLE_T handle
= (VCHIQ_SERVICE_HANDLE_T
)arg
;
1118 ret
= vchiq_get_client_id(handle
);
1121 case VCHIQ_IOC_GET_CONFIG
: {
1122 VCHIQ_GET_CONFIG_T args
;
1123 VCHIQ_CONFIG_T config
;
1125 if (copy_from_user(&args
, (const void __user
*)arg
,
1126 sizeof(args
)) != 0) {
1130 if (args
.config_size
> sizeof(config
)) {
1134 status
= vchiq_get_config(instance
, args
.config_size
, &config
);
1135 if (status
== VCHIQ_SUCCESS
) {
1136 if (copy_to_user((void __user
*)args
.pconfig
,
1137 &config
, args
.config_size
) != 0) {
1144 case VCHIQ_IOC_SET_SERVICE_OPTION
: {
1145 VCHIQ_SET_SERVICE_OPTION_T args
;
1148 &args
, (const void __user
*)arg
,
1149 sizeof(args
)) != 0) {
1154 service
= find_service_for_instance(instance
, args
.handle
);
1160 status
= vchiq_set_service_option(
1161 args
.handle
, args
.option
, args
.value
);
1164 #if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
1165 case VCHIQ_IOC_DUMP_PHYS_MEM
: {
1166 VCHIQ_DUMP_MEM_T args
;
1169 (&args
, (const void __user
*)arg
,
1170 sizeof(args
)) != 0) {
1174 dump_phys_mem(args
.virt_addr
, args
.num_bytes
);
1178 case VCHIQ_IOC_LIB_VERSION
: {
1179 unsigned int lib_version
= (unsigned int)arg
;
1181 if (lib_version
< VCHIQ_VERSION_MIN
)
1183 else if (lib_version
>= VCHIQ_VERSION_CLOSE_DELIVERED
)
1184 instance
->use_close_delivered
= 1;
1187 case VCHIQ_IOC_CLOSE_DELIVERED
: {
1188 VCHIQ_SERVICE_HANDLE_T handle
= (VCHIQ_SERVICE_HANDLE_T
)arg
;
1190 service
= find_closed_service_for_instance(instance
, handle
);
1191 if (service
!= NULL
) {
1192 USER_SERVICE_T
*user_service
=
1193 (USER_SERVICE_T
*)service
->base
.userdata
;
1194 close_delivered(user_service
);
1206 unlock_service(service
);
1209 if (status
== VCHIQ_ERROR
)
1211 else if (status
== VCHIQ_RETRY
)
1215 if ((status
== VCHIQ_SUCCESS
) && (ret
< 0) && (ret
!= -EINTR
) &&
1216 (ret
!= -EWOULDBLOCK
))
1217 vchiq_log_info(vchiq_arm_log_level
,
1218 " ioctl instance %lx, cmd %s -> status %d, %ld",
1219 (unsigned long)instance
,
1220 (_IOC_NR(cmd
) <= VCHIQ_IOC_MAX
) ?
1221 ioctl_names
[_IOC_NR(cmd
)] :
1225 vchiq_log_trace(vchiq_arm_log_level
,
1226 " ioctl instance %lx, cmd %s -> status %d, %ld",
1227 (unsigned long)instance
,
1228 (_IOC_NR(cmd
) <= VCHIQ_IOC_MAX
) ?
1229 ioctl_names
[_IOC_NR(cmd
)] :
1236 #if defined(CONFIG_COMPAT)
1238 struct vchiq_service_params32
{
1240 compat_uptr_t callback
;
1241 compat_uptr_t userdata
;
1242 short version
; /* Increment for non-trivial changes */
1243 short version_min
; /* Update for incompatible changes */
1246 struct vchiq_create_service32
{
1247 struct vchiq_service_params32 params
;
1250 unsigned int handle
; /* OUT */
1253 #define VCHIQ_IOC_CREATE_SERVICE32 \
1254 _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
1257 vchiq_compat_ioctl_create_service(
1262 VCHIQ_CREATE_SERVICE_T __user
*args
;
1263 struct vchiq_create_service32 __user
*ptrargs32
=
1264 (struct vchiq_create_service32 __user
*)arg
;
1265 struct vchiq_create_service32 args32
;
1268 args
= compat_alloc_user_space(sizeof(*args
));
1272 if (copy_from_user(&args32
,
1273 (struct vchiq_create_service32 __user
*)arg
,
1277 if (put_user(args32
.params
.fourcc
, &args
->params
.fourcc
) ||
1278 put_user(compat_ptr(args32
.params
.callback
),
1279 &args
->params
.callback
) ||
1280 put_user(compat_ptr(args32
.params
.userdata
),
1281 &args
->params
.userdata
) ||
1282 put_user(args32
.params
.version
, &args
->params
.version
) ||
1283 put_user(args32
.params
.version_min
,
1284 &args
->params
.version_min
) ||
1285 put_user(args32
.is_open
, &args
->is_open
) ||
1286 put_user(args32
.is_vchi
, &args
->is_vchi
) ||
1287 put_user(args32
.handle
, &args
->handle
))
1290 ret
= vchiq_ioctl(file
, VCHIQ_IOC_CREATE_SERVICE
, (unsigned long)args
);
1295 if (get_user(args32
.handle
, &args
->handle
))
1298 if (copy_to_user(&ptrargs32
->handle
,
1300 sizeof(args32
.handle
)))
1306 struct vchiq_element32
{
1311 struct vchiq_queue_message32
{
1312 unsigned int handle
;
1314 compat_uptr_t elements
;
1317 #define VCHIQ_IOC_QUEUE_MESSAGE32 \
1318 _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32)
1321 vchiq_compat_ioctl_queue_message(struct file
*file
,
1325 VCHIQ_QUEUE_MESSAGE_T
*args
;
1326 struct vchiq_element
*elements
;
1327 struct vchiq_queue_message32 args32
;
1330 if (copy_from_user(&args32
,
1331 (struct vchiq_queue_message32 __user
*)arg
,
1335 args
= compat_alloc_user_space(sizeof(*args
) +
1336 (sizeof(*elements
) * MAX_ELEMENTS
));
1341 if (put_user(args32
.handle
, &args
->handle
) ||
1342 put_user(args32
.count
, &args
->count
) ||
1343 put_user(compat_ptr(args32
.elements
), &args
->elements
))
1346 if (args32
.count
> MAX_ELEMENTS
)
1349 if (args32
.elements
&& args32
.count
) {
1350 struct vchiq_element32 tempelement32
[MAX_ELEMENTS
];
1352 elements
= (struct vchiq_element __user
*)(args
+ 1);
1354 if (copy_from_user(&tempelement32
,
1355 compat_ptr(args32
.elements
),
1356 sizeof(tempelement32
)))
1359 for (count
= 0; count
< args32
.count
; count
++) {
1360 if (put_user(compat_ptr(tempelement32
[count
].data
),
1361 &elements
[count
].data
) ||
1362 put_user(tempelement32
[count
].size
,
1363 &elements
[count
].size
))
1367 if (put_user(elements
, &args
->elements
))
1371 return vchiq_ioctl(file
, VCHIQ_IOC_QUEUE_MESSAGE
, (unsigned long)args
);
1374 struct vchiq_queue_bulk_transfer32
{
1375 unsigned int handle
;
1378 compat_uptr_t userdata
;
1379 VCHIQ_BULK_MODE_T mode
;
1382 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1383 _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1384 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1385 _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1388 vchiq_compat_ioctl_queue_bulk(struct file
*file
,
1392 VCHIQ_QUEUE_BULK_TRANSFER_T
*args
;
1393 struct vchiq_queue_bulk_transfer32 args32
;
1394 struct vchiq_queue_bulk_transfer32
*ptrargs32
=
1395 (struct vchiq_queue_bulk_transfer32
*)arg
;
1398 args
= compat_alloc_user_space(sizeof(*args
));
1402 if (copy_from_user(&args32
,
1403 (struct vchiq_queue_bulk_transfer32 __user
*)arg
,
1407 if (put_user(args32
.handle
, &args
->handle
) ||
1408 put_user(compat_ptr(args32
.data
), &args
->data
) ||
1409 put_user(args32
.size
, &args
->size
) ||
1410 put_user(compat_ptr(args32
.userdata
), &args
->userdata
) ||
1411 put_user(args32
.mode
, &args
->mode
))
1414 if (cmd
== VCHIQ_IOC_QUEUE_BULK_TRANSMIT32
)
1415 cmd
= VCHIQ_IOC_QUEUE_BULK_TRANSMIT
;
1417 cmd
= VCHIQ_IOC_QUEUE_BULK_RECEIVE
;
1419 ret
= vchiq_ioctl(file
, cmd
, (unsigned long)args
);
1424 if (get_user(args32
.mode
, &args
->mode
))
1427 if (copy_to_user(&ptrargs32
->mode
,
1429 sizeof(args32
.mode
)))
1435 struct vchiq_completion_data32
{
1436 VCHIQ_REASON_T reason
;
1437 compat_uptr_t header
;
1438 compat_uptr_t service_userdata
;
1439 compat_uptr_t bulk_userdata
;
1442 struct vchiq_await_completion32
{
1445 unsigned int msgbufsize
;
1446 unsigned int msgbufcount
; /* IN/OUT */
1447 compat_uptr_t msgbufs
;
1450 #define VCHIQ_IOC_AWAIT_COMPLETION32 \
1451 _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1454 vchiq_compat_ioctl_await_completion(struct file
*file
,
1458 VCHIQ_AWAIT_COMPLETION_T
*args
;
1459 VCHIQ_COMPLETION_DATA_T
*completion
;
1460 VCHIQ_COMPLETION_DATA_T completiontemp
;
1461 struct vchiq_await_completion32 args32
;
1462 struct vchiq_completion_data32 completion32
;
1463 unsigned int *msgbufcount32
;
1464 compat_uptr_t msgbuf32
;
1469 args
= compat_alloc_user_space(sizeof(*args
) +
1470 sizeof(*completion
) +
1471 sizeof(*msgbufptr
));
1475 completion
= (VCHIQ_COMPLETION_DATA_T
*)(args
+ 1);
1476 msgbufptr
= (void __user
**)(completion
+ 1);
1478 if (copy_from_user(&args32
,
1479 (struct vchiq_completion_data32
*)arg
,
1483 if (put_user(args32
.count
, &args
->count
) ||
1484 put_user(compat_ptr(args32
.buf
), &args
->buf
) ||
1485 put_user(args32
.msgbufsize
, &args
->msgbufsize
) ||
1486 put_user(args32
.msgbufcount
, &args
->msgbufcount
) ||
1487 put_user(compat_ptr(args32
.msgbufs
), &args
->msgbufs
))
1490 /* These are simple cases, so just fall into the native handler */
1491 if (!args32
.count
|| !args32
.buf
|| !args32
.msgbufcount
)
1492 return vchiq_ioctl(file
,
1493 VCHIQ_IOC_AWAIT_COMPLETION
,
1494 (unsigned long)args
);
1497 * These are the more complex cases. Typical applications of this
1498 * ioctl will use a very large count, with a very large msgbufcount.
1499 * Since the native ioctl can asynchronously fill in the returned
1500 * buffers and the application can in theory begin processing messages
1501 * even before the ioctl returns, a bit of a trick is used here.
1503 * By forcing both count and msgbufcount to be 1, it forces the native
1504 * ioctl to only claim at most 1 message is available. This tricks
1505 * the calling application into thinking only 1 message was actually
1506 * available in the queue so like all good applications it will retry
1507 * waiting until all the required messages are received.
1509 * This trick has been tested and proven to work with vchiq_test,
1510 * Minecraft_PI, the "hello pi" examples, and various other
1511 * applications that are included in Raspbian.
1514 if (copy_from_user(&msgbuf32
,
1515 compat_ptr(args32
.msgbufs
) +
1516 (sizeof(compat_uptr_t
) *
1517 (args32
.msgbufcount
- 1)),
1521 msgbuf
= compat_ptr(msgbuf32
);
1523 if (copy_to_user(msgbufptr
,
1528 if (copy_to_user(&args
->msgbufs
,
1533 if (put_user(1U, &args
->count
) ||
1534 put_user(completion
, &args
->buf
) ||
1535 put_user(1U, &args
->msgbufcount
))
1538 ret
= vchiq_ioctl(file
,
1539 VCHIQ_IOC_AWAIT_COMPLETION
,
1540 (unsigned long)args
);
1543 * An return value of 0 here means that no messages where available
1544 * in the message queue. In this case the native ioctl does not
1545 * return any data to the application at all. Not even to update
1546 * msgbufcount. This functionality needs to be kept here for
1549 * Of course, < 0 means that an error occurred and no data is being
1552 * Since count and msgbufcount was forced to 1, that means
1553 * the only other possible return value is 1. Meaning that 1 message
1554 * was available, so that multiple message case does not need to be
1560 if (copy_from_user(&completiontemp
, completion
, sizeof(*completion
)))
1563 completion32
.reason
= completiontemp
.reason
;
1564 completion32
.header
= ptr_to_compat(completiontemp
.header
);
1565 completion32
.service_userdata
=
1566 ptr_to_compat(completiontemp
.service_userdata
);
1567 completion32
.bulk_userdata
=
1568 ptr_to_compat(completiontemp
.bulk_userdata
);
1570 if (copy_to_user(compat_ptr(args32
.buf
),
1572 sizeof(completion32
)))
1575 args32
.msgbufcount
--;
1578 &((struct vchiq_await_completion32 __user
*)arg
)->msgbufcount
;
1580 if (copy_to_user(msgbufcount32
,
1581 &args32
.msgbufcount
,
1582 sizeof(args32
.msgbufcount
)))
1588 struct vchiq_dequeue_message32
{
1589 unsigned int handle
;
1591 unsigned int bufsize
;
1595 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1596 _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1599 vchiq_compat_ioctl_dequeue_message(struct file
*file
,
1603 VCHIQ_DEQUEUE_MESSAGE_T
*args
;
1604 struct vchiq_dequeue_message32 args32
;
1606 args
= compat_alloc_user_space(sizeof(*args
));
1610 if (copy_from_user(&args32
,
1611 (struct vchiq_dequeue_message32
*)arg
,
1615 if (put_user(args32
.handle
, &args
->handle
) ||
1616 put_user(args32
.blocking
, &args
->blocking
) ||
1617 put_user(args32
.bufsize
, &args
->bufsize
) ||
1618 put_user(compat_ptr(args32
.buf
), &args
->buf
))
1621 return vchiq_ioctl(file
, VCHIQ_IOC_DEQUEUE_MESSAGE
,
1622 (unsigned long)args
);
1625 struct vchiq_get_config32
{
1626 unsigned int config_size
;
1627 compat_uptr_t pconfig
;
1630 #define VCHIQ_IOC_GET_CONFIG32 \
1631 _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1634 vchiq_compat_ioctl_get_config(struct file
*file
,
1638 VCHIQ_GET_CONFIG_T
*args
;
1639 struct vchiq_get_config32 args32
;
1641 args
= compat_alloc_user_space(sizeof(*args
));
1645 if (copy_from_user(&args32
,
1646 (struct vchiq_get_config32
*)arg
,
1650 if (put_user(args32
.config_size
, &args
->config_size
) ||
1651 put_user(compat_ptr(args32
.pconfig
), &args
->pconfig
))
1654 return vchiq_ioctl(file
, VCHIQ_IOC_GET_CONFIG
, (unsigned long)args
);
1657 #if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
1659 struct vchiq_dump_mem32
{
1660 compat_uptr_t virt_addr
;
1664 #define VCHIQ_IOC_DUMP_PHYS_MEM32 \
1665 _IOW(VCHIQ_IOC_MAGIC, 15, struct vchiq_dump_mem32)
1668 vchiq_compat_ioctl_dump_phys_mem(struct file
*file
,
1672 VCHIQ_DUMP_MEM_T
*args
;
1673 struct vchiq_dump_mem32 args32
;
1675 args
= compat_alloc_user_space(sizeof(*args
));
1679 if (copy_from_user(&args32
,
1680 (struct vchiq_dump_mem32
*)arg
,
1684 if (put_user(compat_ptr(args32
.virt_addr
), &args
->virt_addr
) ||
1685 put_user(args32
.num_bytes
, &args
->num_bytes
))
1688 return vchiq_ioctl(file
, VCHIQ_IOC_DUMP_PHYS_MEM
, (unsigned long)args
);
1694 vchiq_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1697 case VCHIQ_IOC_CREATE_SERVICE32
:
1698 return vchiq_compat_ioctl_create_service(file
, cmd
, arg
);
1699 case VCHIQ_IOC_QUEUE_MESSAGE32
:
1700 return vchiq_compat_ioctl_queue_message(file
, cmd
, arg
);
1701 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32
:
1702 case VCHIQ_IOC_QUEUE_BULK_RECEIVE32
:
1703 return vchiq_compat_ioctl_queue_bulk(file
, cmd
, arg
);
1704 case VCHIQ_IOC_AWAIT_COMPLETION32
:
1705 return vchiq_compat_ioctl_await_completion(file
, cmd
, arg
);
1706 case VCHIQ_IOC_DEQUEUE_MESSAGE32
:
1707 return vchiq_compat_ioctl_dequeue_message(file
, cmd
, arg
);
1708 case VCHIQ_IOC_GET_CONFIG32
:
1709 return vchiq_compat_ioctl_get_config(file
, cmd
, arg
);
1710 #if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
1711 case VCHIQ_IOC_DUMP_PHYS_MEM32
:
1712 return vchiq_compat_ioctl_dump_phys_mem(file
, cmd
, arg
);
1715 return vchiq_ioctl(file
, cmd
, arg
);
1721 /****************************************************************************
1725 ***************************************************************************/
1728 vchiq_open(struct inode
*inode
, struct file
*file
)
1730 int dev
= iminor(inode
) & 0x0f;
1732 vchiq_log_info(vchiq_arm_log_level
, "vchiq_open");
1736 VCHIQ_STATE_T
*state
= vchiq_get_state();
1737 VCHIQ_INSTANCE_T instance
;
1740 vchiq_log_error(vchiq_arm_log_level
,
1741 "vchiq has no connection to VideoCore");
1745 instance
= kzalloc(sizeof(*instance
), GFP_KERNEL
);
1749 instance
->state
= state
;
1750 instance
->pid
= current
->tgid
;
1752 ret
= vchiq_debugfs_add_instance(instance
);
1758 sema_init(&instance
->insert_event
, 0);
1759 sema_init(&instance
->remove_event
, 0);
1760 mutex_init(&instance
->completion_mutex
);
1761 mutex_init(&instance
->bulk_waiter_list_mutex
);
1762 INIT_LIST_HEAD(&instance
->bulk_waiter_list
);
1764 file
->private_data
= instance
;
1768 vchiq_log_error(vchiq_arm_log_level
,
1769 "Unknown minor device: %d", dev
);
1776 /****************************************************************************
1780 ***************************************************************************/
1783 vchiq_release(struct inode
*inode
, struct file
*file
)
1785 int dev
= iminor(inode
) & 0x0f;
1790 VCHIQ_INSTANCE_T instance
= file
->private_data
;
1791 VCHIQ_STATE_T
*state
= vchiq_get_state();
1792 VCHIQ_SERVICE_T
*service
;
1795 vchiq_log_info(vchiq_arm_log_level
,
1796 "vchiq_release: instance=%lx",
1797 (unsigned long)instance
);
1804 /* Ensure videocore is awake to allow termination. */
1805 vchiq_use_internal(instance
->state
, NULL
,
1808 mutex_lock(&instance
->completion_mutex
);
1810 /* Wake the completion thread and ask it to exit */
1811 instance
->closing
= 1;
1812 up(&instance
->insert_event
);
1814 mutex_unlock(&instance
->completion_mutex
);
1816 /* Wake the slot handler if the completion queue is full. */
1817 up(&instance
->remove_event
);
1819 /* Mark all services for termination... */
1821 while ((service
= next_service_by_instance(state
, instance
,
1823 USER_SERVICE_T
*user_service
= service
->base
.userdata
;
1825 /* Wake the slot handler if the msg queue is full. */
1826 up(&user_service
->remove_event
);
1828 vchiq_terminate_service_internal(service
);
1829 unlock_service(service
);
1832 /* ...and wait for them to die */
1834 while ((service
= next_service_by_instance(state
, instance
, &i
))
1836 USER_SERVICE_T
*user_service
= service
->base
.userdata
;
1838 down(&service
->remove_event
);
1840 BUG_ON(service
->srvstate
!= VCHIQ_SRVSTATE_FREE
);
1842 spin_lock(&msg_queue_spinlock
);
1844 while (user_service
->msg_remove
!=
1845 user_service
->msg_insert
) {
1846 VCHIQ_HEADER_T
*header
= user_service
->
1847 msg_queue
[user_service
->msg_remove
&
1848 (MSG_QUEUE_SIZE
- 1)];
1849 user_service
->msg_remove
++;
1850 spin_unlock(&msg_queue_spinlock
);
1853 vchiq_release_message(
1856 spin_lock(&msg_queue_spinlock
);
1859 spin_unlock(&msg_queue_spinlock
);
1861 unlock_service(service
);
1864 /* Release any closed services */
1865 while (instance
->completion_remove
!=
1866 instance
->completion_insert
) {
1867 VCHIQ_COMPLETION_DATA_T
*completion
;
1868 VCHIQ_SERVICE_T
*service
;
1870 completion
= &instance
->completions
[
1871 instance
->completion_remove
&
1872 (MAX_COMPLETIONS
- 1)];
1873 service
= completion
->service_userdata
;
1874 if (completion
->reason
== VCHIQ_SERVICE_CLOSED
)
1876 USER_SERVICE_T
*user_service
=
1877 service
->base
.userdata
;
1879 /* Wake any blocked user-thread */
1880 if (instance
->use_close_delivered
)
1881 up(&user_service
->close_event
);
1882 unlock_service(service
);
1884 instance
->completion_remove
++;
1887 /* Release the PEER service count. */
1888 vchiq_release_internal(instance
->state
, NULL
);
1891 struct list_head
*pos
, *next
;
1893 list_for_each_safe(pos
, next
,
1894 &instance
->bulk_waiter_list
) {
1895 struct bulk_waiter_node
*waiter
;
1897 waiter
= list_entry(pos
,
1898 struct bulk_waiter_node
,
1901 vchiq_log_info(vchiq_arm_log_level
,
1902 "bulk_waiter - cleaned up %pK for pid %d",
1903 waiter
, waiter
->pid
);
1908 vchiq_debugfs_remove_instance(instance
);
1911 file
->private_data
= NULL
;
1915 vchiq_log_error(vchiq_arm_log_level
,
1916 "Unknown minor device: %d", dev
);
1924 /****************************************************************************
1928 ***************************************************************************/
1931 vchiq_dump(void *dump_context
, const char *str
, int len
)
1933 DUMP_CONTEXT_T
*context
= (DUMP_CONTEXT_T
*)dump_context
;
1935 if (context
->actual
< context
->space
) {
1938 if (context
->offset
> 0) {
1939 int skip_bytes
= min(len
, (int)context
->offset
);
1943 context
->offset
-= skip_bytes
;
1944 if (context
->offset
> 0)
1947 copy_bytes
= min(len
, (int)(context
->space
- context
->actual
));
1948 if (copy_bytes
== 0)
1950 if (copy_to_user(context
->buf
+ context
->actual
, str
,
1952 context
->actual
= -EFAULT
;
1953 context
->actual
+= copy_bytes
;
1956 /* If tne terminating NUL is included in the length, then it
1957 ** marks the end of a line and should be replaced with a
1958 ** carriage return. */
1959 if ((len
== 0) && (str
[copy_bytes
- 1] == '\0')) {
1962 if (copy_to_user(context
->buf
+ context
->actual
- 1,
1964 context
->actual
= -EFAULT
;
1969 /****************************************************************************
1971 * vchiq_dump_platform_instance_state
1973 ***************************************************************************/
1976 vchiq_dump_platform_instances(void *dump_context
)
1978 VCHIQ_STATE_T
*state
= vchiq_get_state();
1983 /* There is no list of instances, so instead scan all services,
1984 marking those that have been dumped. */
1986 for (i
= 0; i
< state
->unused_service
; i
++) {
1987 VCHIQ_SERVICE_T
*service
= state
->services
[i
];
1988 VCHIQ_INSTANCE_T instance
;
1990 if (service
&& (service
->base
.callback
== service_callback
)) {
1991 instance
= service
->instance
;
1997 for (i
= 0; i
< state
->unused_service
; i
++) {
1998 VCHIQ_SERVICE_T
*service
= state
->services
[i
];
1999 VCHIQ_INSTANCE_T instance
;
2001 if (service
&& (service
->base
.callback
== service_callback
)) {
2002 instance
= service
->instance
;
2003 if (instance
&& !instance
->mark
) {
2004 len
= snprintf(buf
, sizeof(buf
),
2005 "Instance %pK: pid %d,%s completions %d/%d",
2006 instance
, instance
->pid
,
2007 instance
->connected
? " connected, " :
2009 instance
->completion_insert
-
2010 instance
->completion_remove
,
2013 vchiq_dump(dump_context
, buf
, len
+ 1);
2021 /****************************************************************************
2023 * vchiq_dump_platform_service_state
2025 ***************************************************************************/
2028 vchiq_dump_platform_service_state(void *dump_context
, VCHIQ_SERVICE_T
*service
)
2030 USER_SERVICE_T
*user_service
= (USER_SERVICE_T
*)service
->base
.userdata
;
2034 len
= snprintf(buf
, sizeof(buf
), " instance %pK", service
->instance
);
2036 if ((service
->base
.callback
== service_callback
) &&
2037 user_service
->is_vchi
) {
2038 len
+= snprintf(buf
+ len
, sizeof(buf
) - len
,
2040 user_service
->msg_insert
- user_service
->msg_remove
,
2043 if (user_service
->dequeue_pending
)
2044 len
+= snprintf(buf
+ len
, sizeof(buf
) - len
,
2045 " (dequeue pending)");
2048 vchiq_dump(dump_context
, buf
, len
+ 1);
2051 /****************************************************************************
2055 ***************************************************************************/
2057 #if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
2060 dump_phys_mem(void *virt_addr
, u32 num_bytes
)
2063 u8
*end_virt_addr
= virt_addr
+ num_bytes
;
2070 struct page
**pages
;
2071 u8
*kmapped_virt_ptr
;
2073 /* Align virt_addr and end_virt_addr to 16 byte boundaries. */
2075 virt_addr
= (void *)((unsigned long)virt_addr
& ~0x0fuL
);
2076 end_virt_addr
= (void *)(((unsigned long)end_virt_addr
+ 15uL) &
2079 offset
= (int)(long)virt_addr
& (PAGE_SIZE
- 1);
2080 end_offset
= (int)(long)end_virt_addr
& (PAGE_SIZE
- 1);
2082 num_pages
= DIV_ROUND_UP(offset
+ num_bytes
, PAGE_SIZE
);
2084 pages
= kmalloc(sizeof(struct page
*) * num_pages
, GFP_KERNEL
);
2086 vchiq_log_error(vchiq_arm_log_level
,
2087 "Unable to allocation memory for %d pages\n",
2092 down_read(¤t
->mm
->mmap_sem
);
2093 rc
= get_user_pages(
2094 (unsigned long)virt_addr
, /* start */
2095 num_pages
, /* len */
2097 pages
, /* pages (array of page pointers) */
2099 up_read(¤t
->mm
->mmap_sem
);
2105 vchiq_log_error(vchiq_arm_log_level
,
2106 "Failed to get user pages: %d\n", rc
);
2110 while (offset
< end_offset
) {
2111 int page_offset
= offset
% PAGE_SIZE
;
2113 page_idx
= offset
/ PAGE_SIZE
;
2114 if (page_idx
!= prev_idx
) {
2117 page
= pages
[page_idx
];
2118 kmapped_virt_ptr
= kmap(page
);
2119 prev_idx
= page_idx
;
2122 if (vchiq_arm_log_level
>= VCHIQ_LOG_TRACE
)
2123 vchiq_log_dump_mem("ph",
2124 (u32
)(unsigned long)&kmapped_virt_ptr
[
2126 &kmapped_virt_ptr
[page_offset
], 16);
2135 for (page_idx
= 0; page_idx
< num_pages
; page_idx
++)
2136 put_page(pages
[page_idx
]);
2143 /****************************************************************************
2147 ***************************************************************************/
2150 vchiq_read(struct file
*file
, char __user
*buf
,
2151 size_t count
, loff_t
*ppos
)
2153 DUMP_CONTEXT_T context
;
2157 context
.space
= count
;
2158 context
.offset
= *ppos
;
2160 vchiq_dump_state(&context
, &g_state
);
2162 *ppos
+= context
.actual
;
2164 return context
.actual
;
2168 vchiq_get_state(void)
2171 if (g_state
.remote
== NULL
)
2172 printk(KERN_ERR
"%s: g_state.remote == NULL\n", __func__
);
2173 else if (g_state
.remote
->initialised
!= 1)
2174 printk(KERN_NOTICE
"%s: g_state.remote->initialised != 1 (%d)\n",
2175 __func__
, g_state
.remote
->initialised
);
2177 return ((g_state
.remote
!= NULL
) &&
2178 (g_state
.remote
->initialised
== 1)) ? &g_state
: NULL
;
2181 static const struct file_operations
2183 .owner
= THIS_MODULE
,
2184 .unlocked_ioctl
= vchiq_ioctl
,
2185 #if defined(CONFIG_COMPAT)
2186 .compat_ioctl
= vchiq_compat_ioctl
,
2189 .release
= vchiq_release
,
2194 * Autosuspend related functionality
2198 vchiq_videocore_wanted(VCHIQ_STATE_T
*state
)
2200 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2203 /* autosuspend not supported - always return wanted */
2205 else if (arm_state
->blocked_count
)
2207 else if (!arm_state
->videocore_use_count
)
2208 /* usage count zero - check for override unless we're forcing */
2209 if (arm_state
->resume_blocked
)
2212 return vchiq_platform_videocore_wanted(state
);
2214 /* non-zero usage count - videocore still required */
2218 static VCHIQ_STATUS_T
2219 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason
,
2220 VCHIQ_HEADER_T
*header
,
2221 VCHIQ_SERVICE_HANDLE_T service_user
,
2224 vchiq_log_error(vchiq_susp_log_level
,
2225 "%s callback reason %d", __func__
, reason
);
2230 vchiq_keepalive_thread_func(void *v
)
2232 VCHIQ_STATE_T
*state
= (VCHIQ_STATE_T
*) v
;
2233 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2235 VCHIQ_STATUS_T status
;
2236 VCHIQ_INSTANCE_T instance
;
2237 VCHIQ_SERVICE_HANDLE_T ka_handle
;
2239 VCHIQ_SERVICE_PARAMS_T params
= {
2240 .fourcc
= VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
2241 .callback
= vchiq_keepalive_vchiq_callback
,
2242 .version
= KEEPALIVE_VER
,
2243 .version_min
= KEEPALIVE_VER_MIN
2246 status
= vchiq_initialise(&instance
);
2247 if (status
!= VCHIQ_SUCCESS
) {
2248 vchiq_log_error(vchiq_susp_log_level
,
2249 "%s vchiq_initialise failed %d", __func__
, status
);
2253 status
= vchiq_connect(instance
);
2254 if (status
!= VCHIQ_SUCCESS
) {
2255 vchiq_log_error(vchiq_susp_log_level
,
2256 "%s vchiq_connect failed %d", __func__
, status
);
2260 status
= vchiq_add_service(instance
, ¶ms
, &ka_handle
);
2261 if (status
!= VCHIQ_SUCCESS
) {
2262 vchiq_log_error(vchiq_susp_log_level
,
2263 "%s vchiq_open_service failed %d", __func__
, status
);
2268 long rc
= 0, uc
= 0;
2270 if (wait_for_completion_interruptible(&arm_state
->ka_evt
)
2272 vchiq_log_error(vchiq_susp_log_level
,
2273 "%s interrupted", __func__
);
2274 flush_signals(current
);
2278 /* read and clear counters. Do release_count then use_count to
2279 * prevent getting more releases than uses */
2280 rc
= atomic_xchg(&arm_state
->ka_release_count
, 0);
2281 uc
= atomic_xchg(&arm_state
->ka_use_count
, 0);
2283 /* Call use/release service the requisite number of times.
2284 * Process use before release so use counts don't go negative */
2286 atomic_inc(&arm_state
->ka_use_ack_count
);
2287 status
= vchiq_use_service(ka_handle
);
2288 if (status
!= VCHIQ_SUCCESS
) {
2289 vchiq_log_error(vchiq_susp_log_level
,
2290 "%s vchiq_use_service error %d",
2295 status
= vchiq_release_service(ka_handle
);
2296 if (status
!= VCHIQ_SUCCESS
) {
2297 vchiq_log_error(vchiq_susp_log_level
,
2298 "%s vchiq_release_service error %d",
2305 vchiq_shutdown(instance
);
2313 vchiq_arm_init_state(VCHIQ_STATE_T
*state
, VCHIQ_ARM_STATE_T
*arm_state
)
2316 rwlock_init(&arm_state
->susp_res_lock
);
2318 init_completion(&arm_state
->ka_evt
);
2319 atomic_set(&arm_state
->ka_use_count
, 0);
2320 atomic_set(&arm_state
->ka_use_ack_count
, 0);
2321 atomic_set(&arm_state
->ka_release_count
, 0);
2323 init_completion(&arm_state
->vc_suspend_complete
);
2325 init_completion(&arm_state
->vc_resume_complete
);
2326 /* Initialise to 'done' state. We only want to block on resume
2327 * completion while videocore is suspended. */
2328 set_resume_state(arm_state
, VC_RESUME_RESUMED
);
2330 init_completion(&arm_state
->resume_blocker
);
2331 /* Initialise to 'done' state. We only want to block on this
2332 * completion while resume is blocked */
2333 complete_all(&arm_state
->resume_blocker
);
2335 init_completion(&arm_state
->blocked_blocker
);
2336 /* Initialise to 'done' state. We only want to block on this
2337 * completion while things are waiting on the resume blocker */
2338 complete_all(&arm_state
->blocked_blocker
);
2340 arm_state
->suspend_timer_timeout
= SUSPEND_TIMER_TIMEOUT_MS
;
2341 arm_state
->suspend_timer_running
= 0;
2342 setup_timer(&arm_state
->suspend_timer
, suspend_timer_callback
,
2343 (unsigned long)(state
));
2345 arm_state
->first_connect
= 0;
2348 return VCHIQ_SUCCESS
;
2352 ** Functions to modify the state variables;
2353 ** set_suspend_state
2356 ** There are more state variables than we might like, so ensure they remain in
2357 ** step. Suspend and resume state are maintained separately, since most of
2358 ** these state machines can operate independently. However, there are a few
2359 ** states where state transitions in one state machine cause a reset to the
2360 ** other state machine. In addition, there are some completion events which
2361 ** need to occur on state machine reset and end-state(s), so these are also
2362 ** dealt with in these functions.
2364 ** In all states we set the state variable according to the input, but in some
2365 ** cases we perform additional steps outlined below;
2367 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
2368 ** The suspend completion is completed after any suspend
2369 ** attempt. When we reset the state machine we also reset
2370 ** the completion. This reset occurs when videocore is
2371 ** resumed, and also if we initiate suspend after a suspend
2374 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
2375 ** suspend - ie from this point on we must try to suspend
2376 ** before resuming can occur. We therefore also reset the
2377 ** resume state machine to VC_RESUME_IDLE in this state.
2379 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
2380 ** complete_all on the suspend completion to notify
2381 ** anything waiting for suspend to happen.
2383 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
2384 ** initiate resume, so no need to alter resume state.
2385 ** We call complete_all on the suspend completion to notify
2386 ** of suspend rejection.
2388 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
2389 ** suspend completion and reset the resume state machine.
2391 ** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
2392 ** resume completion is in it's 'done' state whenever
2393 ** videcore is running. Therefore, the VC_RESUME_IDLE
2394 ** state implies that videocore is suspended.
2395 ** Hence, any thread which needs to wait until videocore is
2396 ** running can wait on this completion - it will only block
2397 ** if videocore is suspended.
2399 ** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
2400 ** Call complete_all on the resume completion to unblock
2401 ** any threads waiting for resume. Also reset the suspend
2402 ** state machine to it's idle state.
2404 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
2408 set_suspend_state(VCHIQ_ARM_STATE_T
*arm_state
,
2409 enum vc_suspend_status new_state
)
2411 /* set the state in all cases */
2412 arm_state
->vc_suspend_state
= new_state
;
2414 /* state specific additional actions */
2415 switch (new_state
) {
2416 case VC_SUSPEND_FORCE_CANCELED
:
2417 complete_all(&arm_state
->vc_suspend_complete
);
2419 case VC_SUSPEND_REJECTED
:
2420 complete_all(&arm_state
->vc_suspend_complete
);
2422 case VC_SUSPEND_FAILED
:
2423 complete_all(&arm_state
->vc_suspend_complete
);
2424 arm_state
->vc_resume_state
= VC_RESUME_RESUMED
;
2425 complete_all(&arm_state
->vc_resume_complete
);
2427 case VC_SUSPEND_IDLE
:
2428 reinit_completion(&arm_state
->vc_suspend_complete
);
2430 case VC_SUSPEND_REQUESTED
:
2432 case VC_SUSPEND_IN_PROGRESS
:
2433 set_resume_state(arm_state
, VC_RESUME_IDLE
);
2435 case VC_SUSPEND_SUSPENDED
:
2436 complete_all(&arm_state
->vc_suspend_complete
);
2445 set_resume_state(VCHIQ_ARM_STATE_T
*arm_state
,
2446 enum vc_resume_status new_state
)
2448 /* set the state in all cases */
2449 arm_state
->vc_resume_state
= new_state
;
2451 /* state specific additional actions */
2452 switch (new_state
) {
2453 case VC_RESUME_FAILED
:
2455 case VC_RESUME_IDLE
:
2456 reinit_completion(&arm_state
->vc_resume_complete
);
2458 case VC_RESUME_REQUESTED
:
2460 case VC_RESUME_IN_PROGRESS
:
2462 case VC_RESUME_RESUMED
:
2463 complete_all(&arm_state
->vc_resume_complete
);
2464 set_suspend_state(arm_state
, VC_SUSPEND_IDLE
);
2473 /* should be called with the write lock held */
2475 start_suspend_timer(VCHIQ_ARM_STATE_T
*arm_state
)
2477 del_timer(&arm_state
->suspend_timer
);
2478 arm_state
->suspend_timer
.expires
= jiffies
+
2479 msecs_to_jiffies(arm_state
->
2480 suspend_timer_timeout
);
2481 add_timer(&arm_state
->suspend_timer
);
2482 arm_state
->suspend_timer_running
= 1;
2485 /* should be called with the write lock held */
2487 stop_suspend_timer(VCHIQ_ARM_STATE_T
*arm_state
)
2489 if (arm_state
->suspend_timer_running
) {
2490 del_timer(&arm_state
->suspend_timer
);
2491 arm_state
->suspend_timer_running
= 0;
2496 need_resume(VCHIQ_STATE_T
*state
)
2498 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2500 return (arm_state
->vc_suspend_state
> VC_SUSPEND_IDLE
) &&
2501 (arm_state
->vc_resume_state
< VC_RESUME_REQUESTED
) &&
2502 vchiq_videocore_wanted(state
);
2506 block_resume(VCHIQ_ARM_STATE_T
*arm_state
)
2508 int status
= VCHIQ_SUCCESS
;
2509 const unsigned long timeout_val
=
2510 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS
);
2511 int resume_count
= 0;
2513 /* Allow any threads which were blocked by the last force suspend to
2514 * complete if they haven't already. Only give this one shot; if
2515 * blocked_count is incremented after blocked_blocker is completed
2516 * (which only happens when blocked_count hits 0) then those threads
2517 * will have to wait until next time around */
2518 if (arm_state
->blocked_count
) {
2519 reinit_completion(&arm_state
->blocked_blocker
);
2520 write_unlock_bh(&arm_state
->susp_res_lock
);
2521 vchiq_log_info(vchiq_susp_log_level
, "%s wait for previously "
2522 "blocked clients", __func__
);
2523 if (wait_for_completion_interruptible_timeout(
2524 &arm_state
->blocked_blocker
, timeout_val
)
2526 vchiq_log_error(vchiq_susp_log_level
, "%s wait for "
2527 "previously blocked clients failed", __func__
);
2528 status
= VCHIQ_ERROR
;
2529 write_lock_bh(&arm_state
->susp_res_lock
);
2532 vchiq_log_info(vchiq_susp_log_level
, "%s previously blocked "
2533 "clients resumed", __func__
);
2534 write_lock_bh(&arm_state
->susp_res_lock
);
2537 /* We need to wait for resume to complete if it's in process */
2538 while (arm_state
->vc_resume_state
!= VC_RESUME_RESUMED
&&
2539 arm_state
->vc_resume_state
> VC_RESUME_IDLE
) {
2540 if (resume_count
> 1) {
2541 status
= VCHIQ_ERROR
;
2542 vchiq_log_error(vchiq_susp_log_level
, "%s waited too "
2543 "many times for resume", __func__
);
2546 write_unlock_bh(&arm_state
->susp_res_lock
);
2547 vchiq_log_info(vchiq_susp_log_level
, "%s wait for resume",
2549 if (wait_for_completion_interruptible_timeout(
2550 &arm_state
->vc_resume_complete
, timeout_val
)
2552 vchiq_log_error(vchiq_susp_log_level
, "%s wait for "
2553 "resume failed (%s)", __func__
,
2554 resume_state_names
[arm_state
->vc_resume_state
+
2555 VC_RESUME_NUM_OFFSET
]);
2556 status
= VCHIQ_ERROR
;
2557 write_lock_bh(&arm_state
->susp_res_lock
);
2560 vchiq_log_info(vchiq_susp_log_level
, "%s resumed", __func__
);
2561 write_lock_bh(&arm_state
->susp_res_lock
);
2564 reinit_completion(&arm_state
->resume_blocker
);
2565 arm_state
->resume_blocked
= 1;
2572 unblock_resume(VCHIQ_ARM_STATE_T
*arm_state
)
2574 complete_all(&arm_state
->resume_blocker
);
2575 arm_state
->resume_blocked
= 0;
2578 /* Initiate suspend via slot handler. Should be called with the write lock
2581 vchiq_arm_vcsuspend(VCHIQ_STATE_T
*state
)
2583 VCHIQ_STATUS_T status
= VCHIQ_ERROR
;
2584 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2589 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2590 status
= VCHIQ_SUCCESS
;
2593 switch (arm_state
->vc_suspend_state
) {
2594 case VC_SUSPEND_REQUESTED
:
2595 vchiq_log_info(vchiq_susp_log_level
, "%s: suspend already "
2596 "requested", __func__
);
2598 case VC_SUSPEND_IN_PROGRESS
:
2599 vchiq_log_info(vchiq_susp_log_level
, "%s: suspend already in "
2600 "progress", __func__
);
2604 /* We don't expect to be in other states, so log but continue
2606 vchiq_log_error(vchiq_susp_log_level
,
2607 "%s unexpected suspend state %s", __func__
,
2608 suspend_state_names
[arm_state
->vc_suspend_state
+
2609 VC_SUSPEND_NUM_OFFSET
]);
2611 case VC_SUSPEND_REJECTED
:
2612 case VC_SUSPEND_FAILED
:
2613 /* Ensure any idle state actions have been run */
2614 set_suspend_state(arm_state
, VC_SUSPEND_IDLE
);
2616 case VC_SUSPEND_IDLE
:
2617 vchiq_log_info(vchiq_susp_log_level
,
2618 "%s: suspending", __func__
);
2619 set_suspend_state(arm_state
, VC_SUSPEND_REQUESTED
);
2620 /* kick the slot handler thread to initiate suspend */
2621 request_poll(state
, NULL
, 0);
2626 vchiq_log_trace(vchiq_susp_log_level
, "%s exit %d", __func__
, status
);
2631 vchiq_platform_check_suspend(VCHIQ_STATE_T
*state
)
2633 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2639 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2641 write_lock_bh(&arm_state
->susp_res_lock
);
2642 if (arm_state
->vc_suspend_state
== VC_SUSPEND_REQUESTED
&&
2643 arm_state
->vc_resume_state
== VC_RESUME_RESUMED
) {
2644 set_suspend_state(arm_state
, VC_SUSPEND_IN_PROGRESS
);
2647 write_unlock_bh(&arm_state
->susp_res_lock
);
2650 vchiq_platform_suspend(state
);
2653 vchiq_log_trace(vchiq_susp_log_level
, "%s exit", __func__
);
2659 output_timeout_error(VCHIQ_STATE_T
*state
)
2661 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2663 int vc_use_count
= arm_state
->videocore_use_count
;
2664 int active_services
= state
->unused_service
;
2667 if (!arm_state
->videocore_use_count
) {
2668 snprintf(err
, sizeof(err
), " Videocore usecount is 0");
2671 for (i
= 0; i
< active_services
; i
++) {
2672 VCHIQ_SERVICE_T
*service_ptr
= state
->services
[i
];
2674 if (service_ptr
&& service_ptr
->service_use_count
&&
2675 (service_ptr
->srvstate
!= VCHIQ_SRVSTATE_FREE
)) {
2676 snprintf(err
, sizeof(err
), " %c%c%c%c(%d) service has "
2677 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2678 service_ptr
->base
.fourcc
),
2679 service_ptr
->client_id
,
2680 service_ptr
->service_use_count
,
2681 service_ptr
->service_use_count
==
2682 vc_use_count
? "" : " (+ more)");
2688 vchiq_log_error(vchiq_susp_log_level
,
2689 "timed out waiting for vc suspend (%d).%s",
2690 arm_state
->autosuspend_override
, err
);
2694 /* Try to get videocore into suspended state, regardless of autosuspend state.
2695 ** We don't actually force suspend, since videocore may get into a bad state
2696 ** if we force suspend at a bad time. Instead, we wait for autosuspend to
2697 ** determine a good point to suspend. If this doesn't happen within 100ms we
2700 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2701 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2704 vchiq_arm_force_suspend(VCHIQ_STATE_T
*state
)
2706 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2707 VCHIQ_STATUS_T status
= VCHIQ_ERROR
;
2714 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2716 write_lock_bh(&arm_state
->susp_res_lock
);
2718 status
= block_resume(arm_state
);
2719 if (status
!= VCHIQ_SUCCESS
)
2721 if (arm_state
->vc_suspend_state
== VC_SUSPEND_SUSPENDED
) {
2722 /* Already suspended - just block resume and exit */
2723 vchiq_log_info(vchiq_susp_log_level
, "%s already suspended",
2725 status
= VCHIQ_SUCCESS
;
2727 } else if (arm_state
->vc_suspend_state
<= VC_SUSPEND_IDLE
) {
2728 /* initiate suspend immediately in the case that we're waiting
2729 * for the timeout */
2730 stop_suspend_timer(arm_state
);
2731 if (!vchiq_videocore_wanted(state
)) {
2732 vchiq_log_info(vchiq_susp_log_level
, "%s videocore "
2733 "idle, initiating suspend", __func__
);
2734 status
= vchiq_arm_vcsuspend(state
);
2735 } else if (arm_state
->autosuspend_override
<
2736 FORCE_SUSPEND_FAIL_MAX
) {
2737 vchiq_log_info(vchiq_susp_log_level
, "%s letting "
2738 "videocore go idle", __func__
);
2739 status
= VCHIQ_SUCCESS
;
2741 vchiq_log_warning(vchiq_susp_log_level
, "%s failed too "
2742 "many times - attempting suspend", __func__
);
2743 status
= vchiq_arm_vcsuspend(state
);
2746 vchiq_log_info(vchiq_susp_log_level
, "%s videocore suspend "
2747 "in progress - wait for completion", __func__
);
2748 status
= VCHIQ_SUCCESS
;
2751 /* Wait for suspend to happen due to system idle (not forced..) */
2752 if (status
!= VCHIQ_SUCCESS
)
2753 goto unblock_resume
;
2756 write_unlock_bh(&arm_state
->susp_res_lock
);
2758 rc
= wait_for_completion_interruptible_timeout(
2759 &arm_state
->vc_suspend_complete
,
2760 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS
));
2762 write_lock_bh(&arm_state
->susp_res_lock
);
2764 vchiq_log_warning(vchiq_susp_log_level
, "%s "
2765 "interrupted waiting for suspend", __func__
);
2766 status
= VCHIQ_ERROR
;
2767 goto unblock_resume
;
2768 } else if (rc
== 0) {
2769 if (arm_state
->vc_suspend_state
> VC_SUSPEND_IDLE
) {
2770 /* Repeat timeout once if in progress */
2776 arm_state
->autosuspend_override
++;
2777 output_timeout_error(state
);
2779 status
= VCHIQ_RETRY
;
2780 goto unblock_resume
;
2782 } while (0 < (repeat
--));
2784 /* Check and report state in case we need to abort ARM suspend */
2785 if (arm_state
->vc_suspend_state
!= VC_SUSPEND_SUSPENDED
) {
2786 status
= VCHIQ_RETRY
;
2787 vchiq_log_error(vchiq_susp_log_level
,
2788 "%s videocore suspend failed (state %s)", __func__
,
2789 suspend_state_names
[arm_state
->vc_suspend_state
+
2790 VC_SUSPEND_NUM_OFFSET
]);
2791 /* Reset the state only if it's still in an error state.
2792 * Something could have already initiated another suspend. */
2793 if (arm_state
->vc_suspend_state
< VC_SUSPEND_IDLE
)
2794 set_suspend_state(arm_state
, VC_SUSPEND_IDLE
);
2796 goto unblock_resume
;
2799 /* successfully suspended - unlock and exit */
2803 /* all error states need to unblock resume before exit */
2804 unblock_resume(arm_state
);
2807 write_unlock_bh(&arm_state
->susp_res_lock
);
2810 vchiq_log_trace(vchiq_susp_log_level
, "%s exit %d", __func__
, status
);
2815 vchiq_check_suspend(VCHIQ_STATE_T
*state
)
2817 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2822 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2824 write_lock_bh(&arm_state
->susp_res_lock
);
2825 if (arm_state
->vc_suspend_state
!= VC_SUSPEND_SUSPENDED
&&
2826 arm_state
->first_connect
&&
2827 !vchiq_videocore_wanted(state
)) {
2828 vchiq_arm_vcsuspend(state
);
2830 write_unlock_bh(&arm_state
->susp_res_lock
);
2833 vchiq_log_trace(vchiq_susp_log_level
, "%s exit", __func__
);
2839 vchiq_arm_allow_resume(VCHIQ_STATE_T
*state
)
2841 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2848 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2850 write_lock_bh(&arm_state
->susp_res_lock
);
2851 unblock_resume(arm_state
);
2852 resume
= vchiq_check_resume(state
);
2853 write_unlock_bh(&arm_state
->susp_res_lock
);
2856 if (wait_for_completion_interruptible(
2857 &arm_state
->vc_resume_complete
) < 0) {
2858 vchiq_log_error(vchiq_susp_log_level
,
2859 "%s interrupted", __func__
);
2860 /* failed, cannot accurately derive suspend
2861 * state, so exit early. */
2866 read_lock_bh(&arm_state
->susp_res_lock
);
2867 if (arm_state
->vc_suspend_state
== VC_SUSPEND_SUSPENDED
) {
2868 vchiq_log_info(vchiq_susp_log_level
,
2869 "%s: Videocore remains suspended", __func__
);
2871 vchiq_log_info(vchiq_susp_log_level
,
2872 "%s: Videocore resumed", __func__
);
2875 read_unlock_bh(&arm_state
->susp_res_lock
);
2877 vchiq_log_trace(vchiq_susp_log_level
, "%s exit %d", __func__
, ret
);
2881 /* This function should be called with the write lock held */
2883 vchiq_check_resume(VCHIQ_STATE_T
*state
)
2885 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2891 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2893 if (need_resume(state
)) {
2894 set_resume_state(arm_state
, VC_RESUME_REQUESTED
);
2895 request_poll(state
, NULL
, 0);
2900 vchiq_log_trace(vchiq_susp_log_level
, "%s exit", __func__
);
2905 vchiq_use_internal(VCHIQ_STATE_T
*state
, VCHIQ_SERVICE_T
*service
,
2906 enum USE_TYPE_E use_type
)
2908 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
2909 VCHIQ_STATUS_T ret
= VCHIQ_SUCCESS
;
2912 int local_uc
, local_entity_uc
;
2917 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
2919 if (use_type
== USE_TYPE_VCHIQ
) {
2920 sprintf(entity
, "VCHIQ: ");
2921 entity_uc
= &arm_state
->peer_use_count
;
2922 } else if (service
) {
2923 sprintf(entity
, "%c%c%c%c:%03d",
2924 VCHIQ_FOURCC_AS_4CHARS(service
->base
.fourcc
),
2925 service
->client_id
);
2926 entity_uc
= &service
->service_use_count
;
2928 vchiq_log_error(vchiq_susp_log_level
, "%s null service "
2934 write_lock_bh(&arm_state
->susp_res_lock
);
2935 while (arm_state
->resume_blocked
) {
2936 /* If we call 'use' while force suspend is waiting for suspend,
2937 * then we're about to block the thread which the force is
2938 * waiting to complete, so we're bound to just time out. In this
2939 * case, set the suspend state such that the wait will be
2940 * canceled, so we can complete as quickly as possible. */
2941 if (arm_state
->resume_blocked
&& arm_state
->vc_suspend_state
==
2943 set_suspend_state(arm_state
, VC_SUSPEND_FORCE_CANCELED
);
2946 /* If suspend is already in progress then we need to block */
2947 if (!try_wait_for_completion(&arm_state
->resume_blocker
)) {
2948 /* Indicate that there are threads waiting on the resume
2949 * blocker. These need to be allowed to complete before
2950 * a _second_ call to force suspend can complete,
2951 * otherwise low priority threads might never actually
2953 arm_state
->blocked_count
++;
2954 write_unlock_bh(&arm_state
->susp_res_lock
);
2955 vchiq_log_info(vchiq_susp_log_level
, "%s %s resume "
2956 "blocked - waiting...", __func__
, entity
);
2957 if (wait_for_completion_killable(
2958 &arm_state
->resume_blocker
) != 0) {
2959 vchiq_log_error(vchiq_susp_log_level
, "%s %s "
2960 "wait for resume blocker interrupted",
2963 write_lock_bh(&arm_state
->susp_res_lock
);
2964 arm_state
->blocked_count
--;
2965 write_unlock_bh(&arm_state
->susp_res_lock
);
2968 vchiq_log_info(vchiq_susp_log_level
, "%s %s resume "
2969 "unblocked", __func__
, entity
);
2970 write_lock_bh(&arm_state
->susp_res_lock
);
2971 if (--arm_state
->blocked_count
== 0)
2972 complete_all(&arm_state
->blocked_blocker
);
2976 stop_suspend_timer(arm_state
);
2978 local_uc
= ++arm_state
->videocore_use_count
;
2979 local_entity_uc
= ++(*entity_uc
);
2981 /* If there's a pending request which hasn't yet been serviced then
2982 * just clear it. If we're past VC_SUSPEND_REQUESTED state then
2983 * vc_resume_complete will block until we either resume or fail to
2985 if (arm_state
->vc_suspend_state
<= VC_SUSPEND_REQUESTED
)
2986 set_suspend_state(arm_state
, VC_SUSPEND_IDLE
);
2988 if ((use_type
!= USE_TYPE_SERVICE_NO_RESUME
) && need_resume(state
)) {
2989 set_resume_state(arm_state
, VC_RESUME_REQUESTED
);
2990 vchiq_log_info(vchiq_susp_log_level
,
2991 "%s %s count %d, state count %d",
2992 __func__
, entity
, local_entity_uc
, local_uc
);
2993 request_poll(state
, NULL
, 0);
2995 vchiq_log_trace(vchiq_susp_log_level
,
2996 "%s %s count %d, state count %d",
2997 __func__
, entity
, *entity_uc
, local_uc
);
3000 write_unlock_bh(&arm_state
->susp_res_lock
);
3002 /* Completion is in a done state when we're not suspended, so this won't
3003 * block for the non-suspended case. */
3004 if (!try_wait_for_completion(&arm_state
->vc_resume_complete
)) {
3005 vchiq_log_info(vchiq_susp_log_level
, "%s %s wait for resume",
3007 if (wait_for_completion_killable(
3008 &arm_state
->vc_resume_complete
) != 0) {
3009 vchiq_log_error(vchiq_susp_log_level
, "%s %s wait for "
3010 "resume interrupted", __func__
, entity
);
3014 vchiq_log_info(vchiq_susp_log_level
, "%s %s resumed", __func__
,
3018 if (ret
== VCHIQ_SUCCESS
) {
3019 VCHIQ_STATUS_T status
= VCHIQ_SUCCESS
;
3020 long ack_cnt
= atomic_xchg(&arm_state
->ka_use_ack_count
, 0);
3022 while (ack_cnt
&& (status
== VCHIQ_SUCCESS
)) {
3023 /* Send the use notify to videocore */
3024 status
= vchiq_send_remote_use_active(state
);
3025 if (status
== VCHIQ_SUCCESS
)
3029 &arm_state
->ka_use_ack_count
);
3034 vchiq_log_trace(vchiq_susp_log_level
, "%s exit %d", __func__
, ret
);
3039 vchiq_release_internal(VCHIQ_STATE_T
*state
, VCHIQ_SERVICE_T
*service
)
3041 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
3042 VCHIQ_STATUS_T ret
= VCHIQ_SUCCESS
;
3045 int local_uc
, local_entity_uc
;
3050 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
3053 sprintf(entity
, "%c%c%c%c:%03d",
3054 VCHIQ_FOURCC_AS_4CHARS(service
->base
.fourcc
),
3055 service
->client_id
);
3056 entity_uc
= &service
->service_use_count
;
3058 sprintf(entity
, "PEER: ");
3059 entity_uc
= &arm_state
->peer_use_count
;
3062 write_lock_bh(&arm_state
->susp_res_lock
);
3063 if (!arm_state
->videocore_use_count
|| !(*entity_uc
)) {
3064 /* Don't use BUG_ON - don't allow user thread to crash kernel */
3065 WARN_ON(!arm_state
->videocore_use_count
);
3066 WARN_ON(!(*entity_uc
));
3070 local_uc
= --arm_state
->videocore_use_count
;
3071 local_entity_uc
= --(*entity_uc
);
3073 if (!vchiq_videocore_wanted(state
)) {
3074 if (vchiq_platform_use_suspend_timer() &&
3075 !arm_state
->resume_blocked
) {
3076 /* Only use the timer if we're not trying to force
3077 * suspend (=> resume_blocked) */
3078 start_suspend_timer(arm_state
);
3080 vchiq_log_info(vchiq_susp_log_level
,
3081 "%s %s count %d, state count %d - suspending",
3082 __func__
, entity
, *entity_uc
,
3083 arm_state
->videocore_use_count
);
3084 vchiq_arm_vcsuspend(state
);
3087 vchiq_log_trace(vchiq_susp_log_level
,
3088 "%s %s count %d, state count %d",
3089 __func__
, entity
, *entity_uc
,
3090 arm_state
->videocore_use_count
);
3093 write_unlock_bh(&arm_state
->susp_res_lock
);
3096 vchiq_log_trace(vchiq_susp_log_level
, "%s exit %d", __func__
, ret
);
3101 vchiq_on_remote_use(VCHIQ_STATE_T
*state
)
3103 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
3105 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
3106 atomic_inc(&arm_state
->ka_use_count
);
3107 complete(&arm_state
->ka_evt
);
3111 vchiq_on_remote_release(VCHIQ_STATE_T
*state
)
3113 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
3115 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
3116 atomic_inc(&arm_state
->ka_release_count
);
3117 complete(&arm_state
->ka_evt
);
3121 vchiq_use_service_internal(VCHIQ_SERVICE_T
*service
)
3123 return vchiq_use_internal(service
->state
, service
, USE_TYPE_SERVICE
);
3127 vchiq_release_service_internal(VCHIQ_SERVICE_T
*service
)
3129 return vchiq_release_internal(service
->state
, service
);
3132 VCHIQ_DEBUGFS_NODE_T
*
3133 vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance
)
3135 return &instance
->debugfs_node
;
3139 vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance
)
3141 VCHIQ_SERVICE_T
*service
;
3142 int use_count
= 0, i
;
3145 while ((service
= next_service_by_instance(instance
->state
,
3146 instance
, &i
)) != NULL
) {
3147 use_count
+= service
->service_use_count
;
3148 unlock_service(service
);
3154 vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance
)
3156 return instance
->pid
;
3160 vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance
)
3162 return instance
->trace
;
3166 vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance
, int trace
)
3168 VCHIQ_SERVICE_T
*service
;
3172 while ((service
= next_service_by_instance(instance
->state
,
3173 instance
, &i
)) != NULL
) {
3174 service
->trace
= trace
;
3175 unlock_service(service
);
3177 instance
->trace
= (trace
!= 0);
3180 static void suspend_timer_callback(unsigned long context
)
3182 VCHIQ_STATE_T
*state
= (VCHIQ_STATE_T
*)context
;
3183 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
3187 vchiq_log_info(vchiq_susp_log_level
,
3188 "%s - suspend timer expired - check suspend", __func__
);
3189 vchiq_check_suspend(state
);
3195 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle
)
3197 VCHIQ_STATUS_T ret
= VCHIQ_ERROR
;
3198 VCHIQ_SERVICE_T
*service
= find_service_by_handle(handle
);
3201 ret
= vchiq_use_internal(service
->state
, service
,
3202 USE_TYPE_SERVICE_NO_RESUME
);
3203 unlock_service(service
);
3209 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle
)
3211 VCHIQ_STATUS_T ret
= VCHIQ_ERROR
;
3212 VCHIQ_SERVICE_T
*service
= find_service_by_handle(handle
);
3215 ret
= vchiq_use_internal(service
->state
, service
,
3217 unlock_service(service
);
3223 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle
)
3225 VCHIQ_STATUS_T ret
= VCHIQ_ERROR
;
3226 VCHIQ_SERVICE_T
*service
= find_service_by_handle(handle
);
3229 ret
= vchiq_release_internal(service
->state
, service
);
3230 unlock_service(service
);
3236 vchiq_dump_service_use_state(VCHIQ_STATE_T
*state
)
3238 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
3240 /* Only dump 64 services */
3241 static const int local_max_services
= 64;
3242 /* If there's more than 64 services, only dump ones with
3243 * non-zero counts */
3244 int only_nonzero
= 0;
3245 static const char *nz
= "<-- preventing suspend";
3247 enum vc_suspend_status vc_suspend_state
;
3248 enum vc_resume_status vc_resume_state
;
3251 int active_services
;
3252 struct service_data_struct
{
3256 } service_data
[local_max_services
];
3261 read_lock_bh(&arm_state
->susp_res_lock
);
3262 vc_suspend_state
= arm_state
->vc_suspend_state
;
3263 vc_resume_state
= arm_state
->vc_resume_state
;
3264 peer_count
= arm_state
->peer_use_count
;
3265 vc_use_count
= arm_state
->videocore_use_count
;
3266 active_services
= state
->unused_service
;
3267 if (active_services
> local_max_services
)
3270 for (i
= 0; (i
< active_services
) && (j
< local_max_services
); i
++) {
3271 VCHIQ_SERVICE_T
*service_ptr
= state
->services
[i
];
3276 if (only_nonzero
&& !service_ptr
->service_use_count
)
3279 if (service_ptr
->srvstate
== VCHIQ_SRVSTATE_FREE
)
3282 service_data
[j
].fourcc
= service_ptr
->base
.fourcc
;
3283 service_data
[j
].clientid
= service_ptr
->client_id
;
3284 service_data
[j
++].use_count
= service_ptr
->service_use_count
;
3287 read_unlock_bh(&arm_state
->susp_res_lock
);
3289 vchiq_log_warning(vchiq_susp_log_level
,
3290 "-- Videcore suspend state: %s --",
3291 suspend_state_names
[vc_suspend_state
+ VC_SUSPEND_NUM_OFFSET
]);
3292 vchiq_log_warning(vchiq_susp_log_level
,
3293 "-- Videcore resume state: %s --",
3294 resume_state_names
[vc_resume_state
+ VC_RESUME_NUM_OFFSET
]);
3297 vchiq_log_warning(vchiq_susp_log_level
, "Too many active "
3298 "services (%d). Only dumping up to first %d services "
3299 "with non-zero use-count", active_services
,
3300 local_max_services
);
3302 for (i
= 0; i
< j
; i
++) {
3303 vchiq_log_warning(vchiq_susp_log_level
,
3304 "----- %c%c%c%c:%d service count %d %s",
3305 VCHIQ_FOURCC_AS_4CHARS(service_data
[i
].fourcc
),
3306 service_data
[i
].clientid
,
3307 service_data
[i
].use_count
,
3308 service_data
[i
].use_count
? nz
: "");
3310 vchiq_log_warning(vchiq_susp_log_level
,
3311 "----- VCHIQ use count count %d", peer_count
);
3312 vchiq_log_warning(vchiq_susp_log_level
,
3313 "--- Overall vchiq instance use count %d", vc_use_count
);
3315 vchiq_dump_platform_use_state(state
);
3319 vchiq_check_service(VCHIQ_SERVICE_T
*service
)
3321 VCHIQ_ARM_STATE_T
*arm_state
;
3322 VCHIQ_STATUS_T ret
= VCHIQ_ERROR
;
3324 if (!service
|| !service
->state
)
3327 vchiq_log_trace(vchiq_susp_log_level
, "%s", __func__
);
3329 arm_state
= vchiq_platform_get_arm_state(service
->state
);
3331 read_lock_bh(&arm_state
->susp_res_lock
);
3332 if (service
->service_use_count
)
3333 ret
= VCHIQ_SUCCESS
;
3334 read_unlock_bh(&arm_state
->susp_res_lock
);
3336 if (ret
== VCHIQ_ERROR
) {
3337 vchiq_log_error(vchiq_susp_log_level
,
3338 "%s ERROR - %c%c%c%c:%d service count %d, "
3339 "state count %d, videocore suspend state %s", __func__
,
3340 VCHIQ_FOURCC_AS_4CHARS(service
->base
.fourcc
),
3341 service
->client_id
, service
->service_use_count
,
3342 arm_state
->videocore_use_count
,
3343 suspend_state_names
[arm_state
->vc_suspend_state
+
3344 VC_SUSPEND_NUM_OFFSET
]);
3345 vchiq_dump_service_use_state(service
->state
);
3351 /* stub functions */
3352 void vchiq_on_remote_use_active(VCHIQ_STATE_T
*state
)
3357 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T
*state
,
3358 VCHIQ_CONNSTATE_T oldstate
, VCHIQ_CONNSTATE_T newstate
)
3360 VCHIQ_ARM_STATE_T
*arm_state
= vchiq_platform_get_arm_state(state
);
3362 vchiq_log_info(vchiq_susp_log_level
, "%d: %s->%s", state
->id
,
3363 get_conn_state_name(oldstate
), get_conn_state_name(newstate
));
3364 if (state
->conn_state
== VCHIQ_CONNSTATE_CONNECTED
) {
3365 write_lock_bh(&arm_state
->susp_res_lock
);
3366 if (!arm_state
->first_connect
) {
3367 char threadname
[16];
3369 arm_state
->first_connect
= 1;
3370 write_unlock_bh(&arm_state
->susp_res_lock
);
3371 snprintf(threadname
, sizeof(threadname
), "vchiq-keep/%d",
3373 arm_state
->ka_thread
= kthread_create(
3374 &vchiq_keepalive_thread_func
,
3377 if (IS_ERR(arm_state
->ka_thread
)) {
3378 vchiq_log_error(vchiq_susp_log_level
,
3379 "vchiq: FATAL: couldn't create thread %s",
3382 wake_up_process(arm_state
->ka_thread
);
3385 write_unlock_bh(&arm_state
->susp_res_lock
);
3389 static int vchiq_probe(struct platform_device
*pdev
)
3391 struct device_node
*fw_node
;
3392 struct rpi_firmware
*fw
;
3396 fw_node
= of_parse_phandle(pdev
->dev
.of_node
, "firmware", 0);
3398 dev_err(&pdev
->dev
, "Missing firmware node\n");
3402 fw
= rpi_firmware_get(fw_node
);
3403 of_node_put(fw_node
);
3405 return -EPROBE_DEFER
;
3407 platform_set_drvdata(pdev
, fw
);
3409 err
= vchiq_platform_init(pdev
, &g_state
);
3411 goto failed_platform_init
;
3413 err
= alloc_chrdev_region(&vchiq_devid
, VCHIQ_MINOR
, 1, DEVICE_NAME
);
3415 vchiq_log_error(vchiq_arm_log_level
,
3416 "Unable to allocate device number");
3417 goto failed_platform_init
;
3419 cdev_init(&vchiq_cdev
, &vchiq_fops
);
3420 vchiq_cdev
.owner
= THIS_MODULE
;
3421 err
= cdev_add(&vchiq_cdev
, vchiq_devid
, 1);
3423 vchiq_log_error(vchiq_arm_log_level
,
3424 "Unable to register device");
3425 goto failed_cdev_add
;
3428 /* create sysfs entries */
3429 vchiq_class
= class_create(THIS_MODULE
, DEVICE_NAME
);
3430 ptr_err
= vchiq_class
;
3431 if (IS_ERR(ptr_err
))
3432 goto failed_class_create
;
3434 vchiq_dev
= device_create(vchiq_class
, NULL
,
3435 vchiq_devid
, NULL
, "vchiq");
3436 ptr_err
= vchiq_dev
;
3437 if (IS_ERR(ptr_err
))
3438 goto failed_device_create
;
3440 /* create debugfs entries */
3441 err
= vchiq_debugfs_init();
3443 goto failed_debugfs_init
;
3445 vchiq_log_info(vchiq_arm_log_level
,
3446 "vchiq: initialised - version %d (min %d), device %d.%d",
3447 VCHIQ_VERSION
, VCHIQ_VERSION_MIN
,
3448 MAJOR(vchiq_devid
), MINOR(vchiq_devid
));
3452 failed_debugfs_init
:
3453 device_destroy(vchiq_class
, vchiq_devid
);
3454 failed_device_create
:
3455 class_destroy(vchiq_class
);
3456 failed_class_create
:
3457 cdev_del(&vchiq_cdev
);
3458 err
= PTR_ERR(ptr_err
);
3460 unregister_chrdev_region(vchiq_devid
, 1);
3461 failed_platform_init
:
3462 vchiq_log_warning(vchiq_arm_log_level
, "could not load vchiq");
3466 static int vchiq_remove(struct platform_device
*pdev
)
3468 vchiq_debugfs_deinit();
3469 device_destroy(vchiq_class
, vchiq_devid
);
3470 class_destroy(vchiq_class
);
3471 cdev_del(&vchiq_cdev
);
3472 unregister_chrdev_region(vchiq_devid
, 1);
3477 static const struct of_device_id vchiq_of_match
[] = {
3478 { .compatible
= "brcm,bcm2835-vchiq", },
3481 MODULE_DEVICE_TABLE(of
, vchiq_of_match
);
3483 static struct platform_driver vchiq_driver
= {
3485 .name
= "bcm2835_vchiq",
3486 .of_match_table
= vchiq_of_match
,
3488 .probe
= vchiq_probe
,
3489 .remove
= vchiq_remove
,
3491 module_platform_driver(vchiq_driver
);
3493 MODULE_LICENSE("Dual BSD/GPL");
3494 MODULE_DESCRIPTION("Videocore VCHIQ driver");
3495 MODULE_AUTHOR("Broadcom Corporation");