]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
Merge 4.9-rc5 into staging-next
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_arm.c
CommitLineData
71bad7f0 1/**
2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
3 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The names of the above-listed copyright holders may not be used
15 * to endorse or promote products derived from this software without
16 * specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2, as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/cdev.h>
40#include <linux/fs.h>
41#include <linux/device.h>
42#include <linux/mm.h>
43#include <linux/highmem.h>
44#include <linux/pagemap.h>
45#include <linux/bug.h>
46#include <linux/semaphore.h>
47#include <linux/list.h>
48#include <linux/of.h>
49#include <linux/platform_device.h>
50#include <soc/bcm2835/raspberrypi-firmware.h>
51
52#include "vchiq_core.h"
53#include "vchiq_ioctl.h"
54#include "vchiq_arm.h"
55#include "vchiq_debugfs.h"
56#include "vchiq_killable.h"
57
58#define DEVICE_NAME "vchiq"
59
60/* Override the default prefix, which would be vchiq_arm (from the filename) */
61#undef MODULE_PARAM_PREFIX
62#define MODULE_PARAM_PREFIX DEVICE_NAME "."
63
64#define VCHIQ_MINOR 0
65
66/* Some per-instance constants */
67#define MAX_COMPLETIONS 16
68#define MAX_SERVICES 64
69#define MAX_ELEMENTS 8
70#define MSG_QUEUE_SIZE 64
71
72#define KEEPALIVE_VER 1
73#define KEEPALIVE_VER_MIN KEEPALIVE_VER
74
75/* Run time control of log level, based on KERN_XXX level. */
76int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
77int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
78
79#define SUSPEND_TIMER_TIMEOUT_MS 100
80#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
81
82#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
83static const char *const suspend_state_names[] = {
84 "VC_SUSPEND_FORCE_CANCELED",
85 "VC_SUSPEND_REJECTED",
86 "VC_SUSPEND_FAILED",
87 "VC_SUSPEND_IDLE",
88 "VC_SUSPEND_REQUESTED",
89 "VC_SUSPEND_IN_PROGRESS",
90 "VC_SUSPEND_SUSPENDED"
91};
92#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
93static const char *const resume_state_names[] = {
94 "VC_RESUME_FAILED",
95 "VC_RESUME_IDLE",
96 "VC_RESUME_REQUESTED",
97 "VC_RESUME_IN_PROGRESS",
98 "VC_RESUME_RESUMED"
99};
100/* The number of times we allow force suspend to timeout before actually
101** _forcing_ suspend. This is to cater for SW which fails to release vchiq
102** correctly - we don't want to prevent ARM suspend indefinitely in this case.
103*/
104#define FORCE_SUSPEND_FAIL_MAX 8
105
106/* The time in ms allowed for videocore to go idle when force suspend has been
107 * requested */
108#define FORCE_SUSPEND_TIMEOUT_MS 200
109
110
111static void suspend_timer_callback(unsigned long context);
112
113
114typedef struct user_service_struct {
115 VCHIQ_SERVICE_T *service;
116 void *userdata;
117 VCHIQ_INSTANCE_T instance;
118 char is_vchi;
119 char dequeue_pending;
120 char close_pending;
121 int message_available_pos;
122 int msg_insert;
123 int msg_remove;
124 struct semaphore insert_event;
125 struct semaphore remove_event;
126 struct semaphore close_event;
127 VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
128} USER_SERVICE_T;
129
130struct bulk_waiter_node {
131 struct bulk_waiter bulk_waiter;
132 int pid;
133 struct list_head list;
134};
135
136struct vchiq_instance_struct {
137 VCHIQ_STATE_T *state;
138 VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
139 int completion_insert;
140 int completion_remove;
141 struct semaphore insert_event;
142 struct semaphore remove_event;
143 struct mutex completion_mutex;
144
145 int connected;
146 int closing;
147 int pid;
148 int mark;
149 int use_close_delivered;
150 int trace;
151
152 struct list_head bulk_waiter_list;
153 struct mutex bulk_waiter_list_mutex;
154
155 VCHIQ_DEBUGFS_NODE_T debugfs_node;
156};
157
158typedef struct dump_context_struct {
159 char __user *buf;
160 size_t actual;
161 size_t space;
162 loff_t offset;
163} DUMP_CONTEXT_T;
164
165static struct cdev vchiq_cdev;
166static dev_t vchiq_devid;
167static VCHIQ_STATE_T g_state;
168static struct class *vchiq_class;
169static struct device *vchiq_dev;
170static DEFINE_SPINLOCK(msg_queue_spinlock);
171
172static const char *const ioctl_names[] = {
173 "CONNECT",
174 "SHUTDOWN",
175 "CREATE_SERVICE",
176 "REMOVE_SERVICE",
177 "QUEUE_MESSAGE",
178 "QUEUE_BULK_TRANSMIT",
179 "QUEUE_BULK_RECEIVE",
180 "AWAIT_COMPLETION",
181 "DEQUEUE_MESSAGE",
182 "GET_CLIENT_ID",
183 "GET_CONFIG",
184 "CLOSE_SERVICE",
185 "USE_SERVICE",
186 "RELEASE_SERVICE",
187 "SET_SERVICE_OPTION",
188 "DUMP_PHYS_MEM",
189 "LIB_VERSION",
190 "CLOSE_DELIVERED"
191};
192
193vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
194 (VCHIQ_IOC_MAX + 1));
195
196static void
197dump_phys_mem(void *virt_addr, uint32_t num_bytes);
198
199/****************************************************************************
200*
201* add_completion
202*
203***************************************************************************/
204
205static VCHIQ_STATUS_T
206add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
207 VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
208 void *bulk_userdata)
209{
210 VCHIQ_COMPLETION_DATA_T *completion;
211 DEBUG_INITIALISE(g_state.local)
212
213 while (instance->completion_insert ==
214 (instance->completion_remove + MAX_COMPLETIONS)) {
215 /* Out of space - wait for the client */
216 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
217 vchiq_log_trace(vchiq_arm_log_level,
218 "add_completion - completion queue full");
219 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
220 if (down_interruptible(&instance->remove_event) != 0) {
221 vchiq_log_info(vchiq_arm_log_level,
222 "service_callback interrupted");
223 return VCHIQ_RETRY;
224 } else if (instance->closing) {
225 vchiq_log_info(vchiq_arm_log_level,
226 "service_callback closing");
227 return VCHIQ_ERROR;
228 }
229 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
230 }
231
232 completion =
233 &instance->completions[instance->completion_insert &
234 (MAX_COMPLETIONS - 1)];
235
236 completion->header = header;
237 completion->reason = reason;
238 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
239 completion->service_userdata = user_service->service;
240 completion->bulk_userdata = bulk_userdata;
241
242 if (reason == VCHIQ_SERVICE_CLOSED) {
243 /* Take an extra reference, to be held until
244 this CLOSED notification is delivered. */
245 lock_service(user_service->service);
246 if (instance->use_close_delivered)
247 user_service->close_pending = 1;
248 }
249
250 /* A write barrier is needed here to ensure that the entire completion
251 record is written out before the insert point. */
252 wmb();
253
254 if (reason == VCHIQ_MESSAGE_AVAILABLE)
255 user_service->message_available_pos =
256 instance->completion_insert;
257 instance->completion_insert++;
258
259 up(&instance->insert_event);
260
261 return VCHIQ_SUCCESS;
262}
263
264/****************************************************************************
265*
266* service_callback
267*
268***************************************************************************/
269
270static VCHIQ_STATUS_T
271service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
272 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
273{
274 /* How do we ensure the callback goes to the right client?
275 ** The service_user data points to a USER_SERVICE_T record containing
276 ** the original callback and the user state structure, which contains a
277 ** circular buffer for completion records.
278 */
279 USER_SERVICE_T *user_service;
280 VCHIQ_SERVICE_T *service;
281 VCHIQ_INSTANCE_T instance;
282 DEBUG_INITIALISE(g_state.local)
283
284 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
285
286 service = handle_to_service(handle);
287 BUG_ON(!service);
288 user_service = (USER_SERVICE_T *)service->base.userdata;
289 instance = user_service->instance;
290
291 if (!instance || instance->closing)
292 return VCHIQ_SUCCESS;
293
294 vchiq_log_trace(vchiq_arm_log_level,
295 "service_callback - service %lx(%d,%p), reason %d, header %lx, "
296 "instance %lx, bulk_userdata %lx",
297 (unsigned long)user_service,
298 service->localport, user_service->userdata,
299 reason, (unsigned long)header,
300 (unsigned long)instance, (unsigned long)bulk_userdata);
301
302 if (header && user_service->is_vchi) {
303 spin_lock(&msg_queue_spinlock);
304 while (user_service->msg_insert ==
305 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
306 spin_unlock(&msg_queue_spinlock);
307 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
308 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
309 vchiq_log_trace(vchiq_arm_log_level,
310 "service_callback - msg queue full");
311 /* If there is no MESSAGE_AVAILABLE in the completion
312 ** queue, add one
313 */
314 if ((user_service->message_available_pos -
315 instance->completion_remove) < 0) {
316 VCHIQ_STATUS_T status;
317 vchiq_log_info(vchiq_arm_log_level,
318 "Inserting extra MESSAGE_AVAILABLE");
319 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
320 status = add_completion(instance, reason,
321 NULL, user_service, bulk_userdata);
322 if (status != VCHIQ_SUCCESS) {
323 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
324 return status;
325 }
326 }
327
328 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
329 if (down_interruptible(&user_service->remove_event)
330 != 0) {
331 vchiq_log_info(vchiq_arm_log_level,
332 "service_callback interrupted");
333 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
334 return VCHIQ_RETRY;
335 } else if (instance->closing) {
336 vchiq_log_info(vchiq_arm_log_level,
337 "service_callback closing");
338 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
339 return VCHIQ_ERROR;
340 }
341 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
342 spin_lock(&msg_queue_spinlock);
343 }
344
345 user_service->msg_queue[user_service->msg_insert &
346 (MSG_QUEUE_SIZE - 1)] = header;
347 user_service->msg_insert++;
348 spin_unlock(&msg_queue_spinlock);
349
350 up(&user_service->insert_event);
351
352 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
353 ** there is a MESSAGE_AVAILABLE in the completion queue then
354 ** bypass the completion queue.
355 */
356 if (((user_service->message_available_pos -
357 instance->completion_remove) >= 0) ||
358 user_service->dequeue_pending) {
359 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
360 user_service->dequeue_pending = 0;
361 return VCHIQ_SUCCESS;
362 }
363
364 header = NULL;
365 }
366 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
367
368 return add_completion(instance, reason, header, user_service,
369 bulk_userdata);
370}
371
372/****************************************************************************
373*
374* user_service_free
375*
376***************************************************************************/
377static void
378user_service_free(void *userdata)
379{
380 kfree(userdata);
381}
382
383/****************************************************************************
384*
385* close_delivered
386*
387***************************************************************************/
388static void close_delivered(USER_SERVICE_T *user_service)
389{
390 vchiq_log_info(vchiq_arm_log_level,
391 "close_delivered(handle=%x)",
392 user_service->service->handle);
393
394 if (user_service->close_pending) {
395 /* Allow the underlying service to be culled */
396 unlock_service(user_service->service);
397
398 /* Wake the user-thread blocked in close_ or remove_service */
399 up(&user_service->close_event);
400
401 user_service->close_pending = 0;
402 }
403}
404
49bec49f
MZ
405struct vchiq_io_copy_callback_context {
406 VCHIQ_ELEMENT_T *current_element;
407 size_t current_element_offset;
408 unsigned long elements_to_go;
409 size_t current_offset;
410};
411
412static ssize_t
413vchiq_ioc_copy_element_data(
414 void *context,
415 void *dest,
416 size_t offset,
417 size_t maxsize)
418{
419 long res;
420 size_t bytes_this_round;
421 struct vchiq_io_copy_callback_context *copy_context =
422 (struct vchiq_io_copy_callback_context *)context;
423
424 if (offset != copy_context->current_offset)
425 return 0;
426
427 if (!copy_context->elements_to_go)
428 return 0;
429
430 /*
431 * Complex logic here to handle the case of 0 size elements
432 * in the middle of the array of elements.
433 *
434 * Need to skip over these 0 size elements.
435 */
436 while (1) {
437 bytes_this_round = min(copy_context->current_element->size -
438 copy_context->current_element_offset,
439 maxsize);
440
441 if (bytes_this_round)
442 break;
443
444 copy_context->elements_to_go--;
445 copy_context->current_element++;
446 copy_context->current_element_offset = 0;
447
448 if (!copy_context->elements_to_go)
449 return 0;
450 }
451
452 res = copy_from_user(dest,
453 copy_context->current_element->data +
454 copy_context->current_element_offset,
455 bytes_this_round);
456
457 if (res != 0)
458 return -EFAULT;
459
460 copy_context->current_element_offset += bytes_this_round;
461 copy_context->current_offset += bytes_this_round;
462
463 /*
464 * Check if done with current element, and if so advance to the next.
465 */
466 if (copy_context->current_element_offset ==
467 copy_context->current_element->size) {
468 copy_context->elements_to_go--;
469 copy_context->current_element++;
470 copy_context->current_element_offset = 0;
471 }
472
473 return bytes_this_round;
474}
475
476/**************************************************************************
477 *
478 * vchiq_ioc_queue_message
479 *
480 **************************************************************************/
481static VCHIQ_STATUS_T
482vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
483 VCHIQ_ELEMENT_T *elements,
484 unsigned long count)
485{
486 struct vchiq_io_copy_callback_context context;
487 unsigned long i;
488 size_t total_size = 0;
489
490 context.current_element = elements;
491 context.current_element_offset = 0;
492 context.elements_to_go = count;
493 context.current_offset = 0;
494
495 for (i = 0; i < count; i++) {
496 if (!elements[i].data && elements[i].size != 0)
497 return -EFAULT;
498
499 total_size += elements[i].size;
500 }
501
502 return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
503 &context, total_size);
504}
505
71bad7f0 506/****************************************************************************
507*
508* vchiq_ioctl
509*
510***************************************************************************/
511static long
512vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
513{
514 VCHIQ_INSTANCE_T instance = file->private_data;
515 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
516 VCHIQ_SERVICE_T *service = NULL;
517 long ret = 0;
518 int i, rc;
519 DEBUG_INITIALISE(g_state.local)
520
521 vchiq_log_trace(vchiq_arm_log_level,
df044ebf
GKH
522 "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
523 instance,
71bad7f0 524 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
525 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
526 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
527
528 switch (cmd) {
529 case VCHIQ_IOC_SHUTDOWN:
530 if (!instance->connected)
531 break;
532
533 /* Remove all services */
534 i = 0;
535 while ((service = next_service_by_instance(instance->state,
536 instance, &i)) != NULL) {
537 status = vchiq_remove_service(service->handle);
538 unlock_service(service);
539 if (status != VCHIQ_SUCCESS)
540 break;
541 }
542 service = NULL;
543
544 if (status == VCHIQ_SUCCESS) {
545 /* Wake the completion thread and ask it to exit */
546 instance->closing = 1;
547 up(&instance->insert_event);
548 }
549
550 break;
551
552 case VCHIQ_IOC_CONNECT:
553 if (instance->connected) {
554 ret = -EINVAL;
555 break;
556 }
557 rc = mutex_lock_interruptible(&instance->state->mutex);
558 if (rc != 0) {
559 vchiq_log_error(vchiq_arm_log_level,
560 "vchiq: connect: could not lock mutex for "
561 "state %d: %d",
562 instance->state->id, rc);
563 ret = -EINTR;
564 break;
565 }
566 status = vchiq_connect_internal(instance->state, instance);
567 mutex_unlock(&instance->state->mutex);
568
569 if (status == VCHIQ_SUCCESS)
570 instance->connected = 1;
571 else
572 vchiq_log_error(vchiq_arm_log_level,
573 "vchiq: could not connect: %d", status);
574 break;
575
576 case VCHIQ_IOC_CREATE_SERVICE: {
577 VCHIQ_CREATE_SERVICE_T args;
578 USER_SERVICE_T *user_service = NULL;
579 void *userdata;
580 int srvstate;
581
582 if (copy_from_user
583 (&args, (const void __user *)arg,
584 sizeof(args)) != 0) {
585 ret = -EFAULT;
586 break;
587 }
588
589 user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
590 if (!user_service) {
591 ret = -ENOMEM;
592 break;
593 }
594
595 if (args.is_open) {
596 if (!instance->connected) {
597 ret = -ENOTCONN;
598 kfree(user_service);
599 break;
600 }
601 srvstate = VCHIQ_SRVSTATE_OPENING;
602 } else {
603 srvstate =
604 instance->connected ?
605 VCHIQ_SRVSTATE_LISTENING :
606 VCHIQ_SRVSTATE_HIDDEN;
607 }
608
609 userdata = args.params.userdata;
610 args.params.callback = service_callback;
611 args.params.userdata = user_service;
612 service = vchiq_add_service_internal(
613 instance->state,
614 &args.params, srvstate,
615 instance, user_service_free);
616
617 if (service != NULL) {
618 user_service->service = service;
619 user_service->userdata = userdata;
620 user_service->instance = instance;
621 user_service->is_vchi = (args.is_vchi != 0);
622 user_service->dequeue_pending = 0;
623 user_service->close_pending = 0;
624 user_service->message_available_pos =
625 instance->completion_remove - 1;
626 user_service->msg_insert = 0;
627 user_service->msg_remove = 0;
628 sema_init(&user_service->insert_event, 0);
629 sema_init(&user_service->remove_event, 0);
630 sema_init(&user_service->close_event, 0);
631
632 if (args.is_open) {
633 status = vchiq_open_service_internal
634 (service, instance->pid);
635 if (status != VCHIQ_SUCCESS) {
636 vchiq_remove_service(service->handle);
637 service = NULL;
638 ret = (status == VCHIQ_RETRY) ?
639 -EINTR : -EIO;
640 break;
641 }
642 }
643
644 if (copy_to_user((void __user *)
645 &(((VCHIQ_CREATE_SERVICE_T __user *)
646 arg)->handle),
647 (const void *)&service->handle,
648 sizeof(service->handle)) != 0) {
649 ret = -EFAULT;
650 vchiq_remove_service(service->handle);
651 }
652
653 service = NULL;
654 } else {
655 ret = -EEXIST;
656 kfree(user_service);
657 }
658 } break;
659
660 case VCHIQ_IOC_CLOSE_SERVICE: {
661 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
662
663 service = find_service_for_instance(instance, handle);
664 if (service != NULL) {
665 USER_SERVICE_T *user_service =
666 (USER_SERVICE_T *)service->base.userdata;
667 /* close_pending is false on first entry, and when the
668 wait in vchiq_close_service has been interrupted. */
669 if (!user_service->close_pending) {
670 status = vchiq_close_service(service->handle);
671 if (status != VCHIQ_SUCCESS)
672 break;
673 }
674
675 /* close_pending is true once the underlying service
676 has been closed until the client library calls the
677 CLOSE_DELIVERED ioctl, signalling close_event. */
678 if (user_service->close_pending &&
679 down_interruptible(&user_service->close_event))
680 status = VCHIQ_RETRY;
681 }
682 else
683 ret = -EINVAL;
684 } break;
685
686 case VCHIQ_IOC_REMOVE_SERVICE: {
687 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
688
689 service = find_service_for_instance(instance, handle);
690 if (service != NULL) {
691 USER_SERVICE_T *user_service =
692 (USER_SERVICE_T *)service->base.userdata;
693 /* close_pending is false on first entry, and when the
694 wait in vchiq_close_service has been interrupted. */
695 if (!user_service->close_pending) {
696 status = vchiq_remove_service(service->handle);
697 if (status != VCHIQ_SUCCESS)
698 break;
699 }
700
701 /* close_pending is true once the underlying service
702 has been closed until the client library calls the
703 CLOSE_DELIVERED ioctl, signalling close_event. */
704 if (user_service->close_pending &&
705 down_interruptible(&user_service->close_event))
706 status = VCHIQ_RETRY;
707 }
708 else
709 ret = -EINVAL;
710 } break;
711
712 case VCHIQ_IOC_USE_SERVICE:
713 case VCHIQ_IOC_RELEASE_SERVICE: {
714 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
715
716 service = find_service_for_instance(instance, handle);
717 if (service != NULL) {
718 status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
719 vchiq_use_service_internal(service) :
720 vchiq_release_service_internal(service);
721 if (status != VCHIQ_SUCCESS) {
722 vchiq_log_error(vchiq_susp_log_level,
723 "%s: cmd %s returned error %d for "
724 "service %c%c%c%c:%03d",
725 __func__,
726 (cmd == VCHIQ_IOC_USE_SERVICE) ?
727 "VCHIQ_IOC_USE_SERVICE" :
728 "VCHIQ_IOC_RELEASE_SERVICE",
729 status,
730 VCHIQ_FOURCC_AS_4CHARS(
731 service->base.fourcc),
732 service->client_id);
733 ret = -EINVAL;
734 }
735 } else
736 ret = -EINVAL;
737 } break;
738
739 case VCHIQ_IOC_QUEUE_MESSAGE: {
740 VCHIQ_QUEUE_MESSAGE_T args;
741 if (copy_from_user
742 (&args, (const void __user *)arg,
743 sizeof(args)) != 0) {
744 ret = -EFAULT;
745 break;
746 }
747
748 service = find_service_for_instance(instance, args.handle);
749
750 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
751 /* Copy elements into kernel space */
752 VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
753 if (copy_from_user(elements, args.elements,
754 args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
49bec49f 755 status = vchiq_ioc_queue_message
71bad7f0 756 (args.handle,
757 elements, args.count);
758 else
759 ret = -EFAULT;
760 } else {
761 ret = -EINVAL;
762 }
763 } break;
764
765 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
766 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
767 VCHIQ_QUEUE_BULK_TRANSFER_T args;
768 struct bulk_waiter_node *waiter = NULL;
769 VCHIQ_BULK_DIR_T dir =
770 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
771 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
772
773 if (copy_from_user
774 (&args, (const void __user *)arg,
775 sizeof(args)) != 0) {
776 ret = -EFAULT;
777 break;
778 }
779
780 service = find_service_for_instance(instance, args.handle);
781 if (!service) {
782 ret = -EINVAL;
783 break;
784 }
785
786 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
787 waiter = kzalloc(sizeof(struct bulk_waiter_node),
788 GFP_KERNEL);
789 if (!waiter) {
790 ret = -ENOMEM;
791 break;
792 }
793 args.userdata = &waiter->bulk_waiter;
794 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
795 struct list_head *pos;
796 mutex_lock(&instance->bulk_waiter_list_mutex);
797 list_for_each(pos, &instance->bulk_waiter_list) {
798 if (list_entry(pos, struct bulk_waiter_node,
799 list)->pid == current->pid) {
800 waiter = list_entry(pos,
801 struct bulk_waiter_node,
802 list);
803 list_del(pos);
804 break;
805 }
806
807 }
808 mutex_unlock(&instance->bulk_waiter_list_mutex);
809 if (!waiter) {
810 vchiq_log_error(vchiq_arm_log_level,
811 "no bulk_waiter found for pid %d",
812 current->pid);
813 ret = -ESRCH;
814 break;
815 }
816 vchiq_log_info(vchiq_arm_log_level,
df044ebf
GKH
817 "found bulk_waiter %pK for pid %d", waiter,
818 current->pid);
71bad7f0 819 args.userdata = &waiter->bulk_waiter;
820 }
821 status = vchiq_bulk_transfer
822 (args.handle,
823 VCHI_MEM_HANDLE_INVALID,
824 args.data, args.size,
825 args.userdata, args.mode,
826 dir);
827 if (!waiter)
828 break;
829 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
830 !waiter->bulk_waiter.bulk) {
831 if (waiter->bulk_waiter.bulk) {
832 /* Cancel the signal when the transfer
833 ** completes. */
834 spin_lock(&bulk_waiter_spinlock);
835 waiter->bulk_waiter.bulk->userdata = NULL;
836 spin_unlock(&bulk_waiter_spinlock);
837 }
838 kfree(waiter);
839 } else {
840 const VCHIQ_BULK_MODE_T mode_waiting =
841 VCHIQ_BULK_MODE_WAITING;
842 waiter->pid = current->pid;
843 mutex_lock(&instance->bulk_waiter_list_mutex);
844 list_add(&waiter->list, &instance->bulk_waiter_list);
845 mutex_unlock(&instance->bulk_waiter_list_mutex);
846 vchiq_log_info(vchiq_arm_log_level,
df044ebf
GKH
847 "saved bulk_waiter %pK for pid %d",
848 waiter, current->pid);
71bad7f0 849
850 if (copy_to_user((void __user *)
851 &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
852 arg)->mode),
853 (const void *)&mode_waiting,
854 sizeof(mode_waiting)) != 0)
855 ret = -EFAULT;
856 }
857 } break;
858
859 case VCHIQ_IOC_AWAIT_COMPLETION: {
860 VCHIQ_AWAIT_COMPLETION_T args;
861
862 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
863 if (!instance->connected) {
864 ret = -ENOTCONN;
865 break;
866 }
867
868 if (copy_from_user(&args, (const void __user *)arg,
869 sizeof(args)) != 0) {
870 ret = -EFAULT;
871 break;
872 }
873
874 mutex_lock(&instance->completion_mutex);
875
876 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
877 while ((instance->completion_remove ==
878 instance->completion_insert)
879 && !instance->closing) {
880 int rc;
881 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
882 mutex_unlock(&instance->completion_mutex);
883 rc = down_interruptible(&instance->insert_event);
884 mutex_lock(&instance->completion_mutex);
885 if (rc != 0) {
886 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
887 vchiq_log_info(vchiq_arm_log_level,
888 "AWAIT_COMPLETION interrupted");
889 ret = -EINTR;
890 break;
891 }
892 }
893 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
894
895 /* A read memory barrier is needed to stop prefetch of a stale
896 ** completion record
897 */
898 rmb();
899
900 if (ret == 0) {
901 int msgbufcount = args.msgbufcount;
902 for (ret = 0; ret < args.count; ret++) {
903 VCHIQ_COMPLETION_DATA_T *completion;
904 VCHIQ_SERVICE_T *service;
905 USER_SERVICE_T *user_service;
906 VCHIQ_HEADER_T *header;
907 if (instance->completion_remove ==
908 instance->completion_insert)
909 break;
910 completion = &instance->completions[
911 instance->completion_remove &
912 (MAX_COMPLETIONS - 1)];
913
914 service = completion->service_userdata;
915 user_service = service->base.userdata;
916 completion->service_userdata =
917 user_service->userdata;
918
919 header = completion->header;
920 if (header) {
921 void __user *msgbuf;
922 int msglen;
923
924 msglen = header->size +
925 sizeof(VCHIQ_HEADER_T);
926 /* This must be a VCHIQ-style service */
927 if (args.msgbufsize < msglen) {
928 vchiq_log_error(
929 vchiq_arm_log_level,
df044ebf
GKH
930 "header %pK: msgbufsize %x < msglen %x",
931 header, args.msgbufsize,
71bad7f0 932 msglen);
933 WARN(1, "invalid message "
934 "size\n");
935 if (ret == 0)
936 ret = -EMSGSIZE;
937 break;
938 }
939 if (msgbufcount <= 0)
940 /* Stall here for lack of a
941 ** buffer for the message. */
942 break;
943 /* Get the pointer from user space */
944 msgbufcount--;
945 if (copy_from_user(&msgbuf,
946 (const void __user *)
947 &args.msgbufs[msgbufcount],
948 sizeof(msgbuf)) != 0) {
949 if (ret == 0)
950 ret = -EFAULT;
951 break;
952 }
953
954 /* Copy the message to user space */
955 if (copy_to_user(msgbuf, header,
956 msglen) != 0) {
957 if (ret == 0)
958 ret = -EFAULT;
959 break;
960 }
961
962 /* Now it has been copied, the message
963 ** can be released. */
964 vchiq_release_message(service->handle,
965 header);
966
967 /* The completion must point to the
968 ** msgbuf. */
969 completion->header = msgbuf;
970 }
971
972 if ((completion->reason ==
973 VCHIQ_SERVICE_CLOSED) &&
974 !instance->use_close_delivered)
975 unlock_service(service);
976
977 if (copy_to_user((void __user *)(
978 (size_t)args.buf +
979 ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
980 completion,
981 sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
982 if (ret == 0)
983 ret = -EFAULT;
984 break;
985 }
986
987 instance->completion_remove++;
988 }
989
990 if (msgbufcount != args.msgbufcount) {
991 if (copy_to_user((void __user *)
992 &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
993 msgbufcount,
994 &msgbufcount,
995 sizeof(msgbufcount)) != 0) {
996 ret = -EFAULT;
997 }
998 }
999 }
1000
1001 if (ret != 0)
1002 up(&instance->remove_event);
1003 mutex_unlock(&instance->completion_mutex);
1004 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1005 } break;
1006
1007 case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1008 VCHIQ_DEQUEUE_MESSAGE_T args;
1009 USER_SERVICE_T *user_service;
1010 VCHIQ_HEADER_T *header;
1011
1012 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1013 if (copy_from_user
1014 (&args, (const void __user *)arg,
1015 sizeof(args)) != 0) {
1016 ret = -EFAULT;
1017 break;
1018 }
1019 service = find_service_for_instance(instance, args.handle);
1020 if (!service) {
1021 ret = -EINVAL;
1022 break;
1023 }
1024 user_service = (USER_SERVICE_T *)service->base.userdata;
1025 if (user_service->is_vchi == 0) {
1026 ret = -EINVAL;
1027 break;
1028 }
1029
1030 spin_lock(&msg_queue_spinlock);
1031 if (user_service->msg_remove == user_service->msg_insert) {
1032 if (!args.blocking) {
1033 spin_unlock(&msg_queue_spinlock);
1034 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1035 ret = -EWOULDBLOCK;
1036 break;
1037 }
1038 user_service->dequeue_pending = 1;
1039 do {
1040 spin_unlock(&msg_queue_spinlock);
1041 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1042 if (down_interruptible(
1043 &user_service->insert_event) != 0) {
1044 vchiq_log_info(vchiq_arm_log_level,
1045 "DEQUEUE_MESSAGE interrupted");
1046 ret = -EINTR;
1047 break;
1048 }
1049 spin_lock(&msg_queue_spinlock);
1050 } while (user_service->msg_remove ==
1051 user_service->msg_insert);
1052
1053 if (ret)
1054 break;
1055 }
1056
1057 BUG_ON((int)(user_service->msg_insert -
1058 user_service->msg_remove) < 0);
1059
1060 header = user_service->msg_queue[user_service->msg_remove &
1061 (MSG_QUEUE_SIZE - 1)];
1062 user_service->msg_remove++;
1063 spin_unlock(&msg_queue_spinlock);
1064
1065 up(&user_service->remove_event);
1066 if (header == NULL)
1067 ret = -ENOTCONN;
1068 else if (header->size <= args.bufsize) {
1069 /* Copy to user space if msgbuf is not NULL */
1070 if ((args.buf == NULL) ||
1071 (copy_to_user((void __user *)args.buf,
1072 header->data,
1073 header->size) == 0)) {
1074 ret = header->size;
1075 vchiq_release_message(
1076 service->handle,
1077 header);
1078 } else
1079 ret = -EFAULT;
1080 } else {
1081 vchiq_log_error(vchiq_arm_log_level,
df044ebf
GKH
1082 "header %pK: bufsize %x < size %x",
1083 header, args.bufsize, header->size);
71bad7f0 1084 WARN(1, "invalid size\n");
1085 ret = -EMSGSIZE;
1086 }
1087 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1088 } break;
1089
1090 case VCHIQ_IOC_GET_CLIENT_ID: {
1091 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1092
1093 ret = vchiq_get_client_id(handle);
1094 } break;
1095
1096 case VCHIQ_IOC_GET_CONFIG: {
1097 VCHIQ_GET_CONFIG_T args;
1098 VCHIQ_CONFIG_T config;
1099
1100 if (copy_from_user(&args, (const void __user *)arg,
1101 sizeof(args)) != 0) {
1102 ret = -EFAULT;
1103 break;
1104 }
1105 if (args.config_size > sizeof(config)) {
1106 ret = -EINVAL;
1107 break;
1108 }
1109 status = vchiq_get_config(instance, args.config_size, &config);
1110 if (status == VCHIQ_SUCCESS) {
1111 if (copy_to_user((void __user *)args.pconfig,
1112 &config, args.config_size) != 0) {
1113 ret = -EFAULT;
1114 break;
1115 }
1116 }
1117 } break;
1118
1119 case VCHIQ_IOC_SET_SERVICE_OPTION: {
1120 VCHIQ_SET_SERVICE_OPTION_T args;
1121
1122 if (copy_from_user(
1123 &args, (const void __user *)arg,
1124 sizeof(args)) != 0) {
1125 ret = -EFAULT;
1126 break;
1127 }
1128
1129 service = find_service_for_instance(instance, args.handle);
1130 if (!service) {
1131 ret = -EINVAL;
1132 break;
1133 }
1134
1135 status = vchiq_set_service_option(
1136 args.handle, args.option, args.value);
1137 } break;
1138
1139 case VCHIQ_IOC_DUMP_PHYS_MEM: {
1140 VCHIQ_DUMP_MEM_T args;
1141
1142 if (copy_from_user
1143 (&args, (const void __user *)arg,
1144 sizeof(args)) != 0) {
1145 ret = -EFAULT;
1146 break;
1147 }
1148 dump_phys_mem(args.virt_addr, args.num_bytes);
1149 } break;
1150
1151 case VCHIQ_IOC_LIB_VERSION: {
1152 unsigned int lib_version = (unsigned int)arg;
1153
1154 if (lib_version < VCHIQ_VERSION_MIN)
1155 ret = -EINVAL;
1156 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1157 instance->use_close_delivered = 1;
1158 } break;
1159
1160 case VCHIQ_IOC_CLOSE_DELIVERED: {
1161 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1162
1163 service = find_closed_service_for_instance(instance, handle);
1164 if (service != NULL) {
1165 USER_SERVICE_T *user_service =
1166 (USER_SERVICE_T *)service->base.userdata;
1167 close_delivered(user_service);
1168 }
1169 else
1170 ret = -EINVAL;
1171 } break;
1172
1173 default:
1174 ret = -ENOTTY;
1175 break;
1176 }
1177
1178 if (service)
1179 unlock_service(service);
1180
1181 if (ret == 0) {
1182 if (status == VCHIQ_ERROR)
1183 ret = -EIO;
1184 else if (status == VCHIQ_RETRY)
1185 ret = -EINTR;
1186 }
1187
1188 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1189 (ret != -EWOULDBLOCK))
1190 vchiq_log_info(vchiq_arm_log_level,
1191 " ioctl instance %lx, cmd %s -> status %d, %ld",
1192 (unsigned long)instance,
1193 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1194 ioctl_names[_IOC_NR(cmd)] :
1195 "<invalid>",
1196 status, ret);
1197 else
1198 vchiq_log_trace(vchiq_arm_log_level,
1199 " ioctl instance %lx, cmd %s -> status %d, %ld",
1200 (unsigned long)instance,
1201 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1202 ioctl_names[_IOC_NR(cmd)] :
1203 "<invalid>",
1204 status, ret);
1205
1206 return ret;
1207}
1208
1209/****************************************************************************
1210*
1211* vchiq_open
1212*
1213***************************************************************************/
1214
1215static int
1216vchiq_open(struct inode *inode, struct file *file)
1217{
1218 int dev = iminor(inode) & 0x0f;
1219 vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1220 switch (dev) {
1221 case VCHIQ_MINOR: {
1222 int ret;
1223 VCHIQ_STATE_T *state = vchiq_get_state();
1224 VCHIQ_INSTANCE_T instance;
1225
1226 if (!state) {
1227 vchiq_log_error(vchiq_arm_log_level,
1228 "vchiq has no connection to VideoCore");
1229 return -ENOTCONN;
1230 }
1231
1232 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1233 if (!instance)
1234 return -ENOMEM;
1235
1236 instance->state = state;
1237 instance->pid = current->tgid;
1238
1239 ret = vchiq_debugfs_add_instance(instance);
1240 if (ret != 0) {
1241 kfree(instance);
1242 return ret;
1243 }
1244
1245 sema_init(&instance->insert_event, 0);
1246 sema_init(&instance->remove_event, 0);
1247 mutex_init(&instance->completion_mutex);
1248 mutex_init(&instance->bulk_waiter_list_mutex);
1249 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1250
1251 file->private_data = instance;
1252 } break;
1253
1254 default:
1255 vchiq_log_error(vchiq_arm_log_level,
1256 "Unknown minor device: %d", dev);
1257 return -ENXIO;
1258 }
1259
1260 return 0;
1261}
1262
1263/****************************************************************************
1264*
1265* vchiq_release
1266*
1267***************************************************************************/
1268
1269static int
1270vchiq_release(struct inode *inode, struct file *file)
1271{
1272 int dev = iminor(inode) & 0x0f;
1273 int ret = 0;
1274 switch (dev) {
1275 case VCHIQ_MINOR: {
1276 VCHIQ_INSTANCE_T instance = file->private_data;
1277 VCHIQ_STATE_T *state = vchiq_get_state();
1278 VCHIQ_SERVICE_T *service;
1279 int i;
1280
1281 vchiq_log_info(vchiq_arm_log_level,
1282 "vchiq_release: instance=%lx",
1283 (unsigned long)instance);
1284
1285 if (!state) {
1286 ret = -EPERM;
1287 goto out;
1288 }
1289
1290 /* Ensure videocore is awake to allow termination. */
1291 vchiq_use_internal(instance->state, NULL,
1292 USE_TYPE_VCHIQ);
1293
1294 mutex_lock(&instance->completion_mutex);
1295
1296 /* Wake the completion thread and ask it to exit */
1297 instance->closing = 1;
1298 up(&instance->insert_event);
1299
1300 mutex_unlock(&instance->completion_mutex);
1301
1302 /* Wake the slot handler if the completion queue is full. */
1303 up(&instance->remove_event);
1304
1305 /* Mark all services for termination... */
1306 i = 0;
1307 while ((service = next_service_by_instance(state, instance,
1308 &i)) != NULL) {
1309 USER_SERVICE_T *user_service = service->base.userdata;
1310
1311 /* Wake the slot handler if the msg queue is full. */
1312 up(&user_service->remove_event);
1313
1314 vchiq_terminate_service_internal(service);
1315 unlock_service(service);
1316 }
1317
1318 /* ...and wait for them to die */
1319 i = 0;
1320 while ((service = next_service_by_instance(state, instance, &i))
1321 != NULL) {
1322 USER_SERVICE_T *user_service = service->base.userdata;
1323
1324 down(&service->remove_event);
1325
1326 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1327
1328 spin_lock(&msg_queue_spinlock);
1329
1330 while (user_service->msg_remove !=
1331 user_service->msg_insert) {
1332 VCHIQ_HEADER_T *header = user_service->
1333 msg_queue[user_service->msg_remove &
1334 (MSG_QUEUE_SIZE - 1)];
1335 user_service->msg_remove++;
1336 spin_unlock(&msg_queue_spinlock);
1337
1338 if (header)
1339 vchiq_release_message(
1340 service->handle,
1341 header);
1342 spin_lock(&msg_queue_spinlock);
1343 }
1344
1345 spin_unlock(&msg_queue_spinlock);
1346
1347 unlock_service(service);
1348 }
1349
1350 /* Release any closed services */
1351 while (instance->completion_remove !=
1352 instance->completion_insert) {
1353 VCHIQ_COMPLETION_DATA_T *completion;
1354 VCHIQ_SERVICE_T *service;
1355 completion = &instance->completions[
1356 instance->completion_remove &
1357 (MAX_COMPLETIONS - 1)];
1358 service = completion->service_userdata;
1359 if (completion->reason == VCHIQ_SERVICE_CLOSED)
1360 {
1361 USER_SERVICE_T *user_service =
1362 service->base.userdata;
1363
1364 /* Wake any blocked user-thread */
1365 if (instance->use_close_delivered)
1366 up(&user_service->close_event);
1367 unlock_service(service);
1368 }
1369 instance->completion_remove++;
1370 }
1371
1372 /* Release the PEER service count. */
1373 vchiq_release_internal(instance->state, NULL);
1374
1375 {
1376 struct list_head *pos, *next;
1377 list_for_each_safe(pos, next,
1378 &instance->bulk_waiter_list) {
1379 struct bulk_waiter_node *waiter;
1380 waiter = list_entry(pos,
1381 struct bulk_waiter_node,
1382 list);
1383 list_del(pos);
1384 vchiq_log_info(vchiq_arm_log_level,
df044ebf
GKH
1385 "bulk_waiter - cleaned up %pK for pid %d",
1386 waiter, waiter->pid);
71bad7f0 1387 kfree(waiter);
1388 }
1389 }
1390
1391 vchiq_debugfs_remove_instance(instance);
1392
1393 kfree(instance);
1394 file->private_data = NULL;
1395 } break;
1396
1397 default:
1398 vchiq_log_error(vchiq_arm_log_level,
1399 "Unknown minor device: %d", dev);
1400 ret = -ENXIO;
1401 }
1402
1403out:
1404 return ret;
1405}
1406
1407/****************************************************************************
1408*
1409* vchiq_dump
1410*
1411***************************************************************************/
1412
1413void
1414vchiq_dump(void *dump_context, const char *str, int len)
1415{
1416 DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
1417
1418 if (context->actual < context->space) {
1419 int copy_bytes;
1420 if (context->offset > 0) {
1421 int skip_bytes = min(len, (int)context->offset);
1422 str += skip_bytes;
1423 len -= skip_bytes;
1424 context->offset -= skip_bytes;
1425 if (context->offset > 0)
1426 return;
1427 }
1428 copy_bytes = min(len, (int)(context->space - context->actual));
1429 if (copy_bytes == 0)
1430 return;
1431 if (copy_to_user(context->buf + context->actual, str,
1432 copy_bytes))
1433 context->actual = -EFAULT;
1434 context->actual += copy_bytes;
1435 len -= copy_bytes;
1436
1437 /* If tne terminating NUL is included in the length, then it
1438 ** marks the end of a line and should be replaced with a
1439 ** carriage return. */
1440 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1441 char cr = '\n';
1442 if (copy_to_user(context->buf + context->actual - 1,
1443 &cr, 1))
1444 context->actual = -EFAULT;
1445 }
1446 }
1447}
1448
1449/****************************************************************************
1450*
1451* vchiq_dump_platform_instance_state
1452*
1453***************************************************************************/
1454
1455void
1456vchiq_dump_platform_instances(void *dump_context)
1457{
1458 VCHIQ_STATE_T *state = vchiq_get_state();
1459 char buf[80];
1460 int len;
1461 int i;
1462
1463 /* There is no list of instances, so instead scan all services,
1464 marking those that have been dumped. */
1465
1466 for (i = 0; i < state->unused_service; i++) {
1467 VCHIQ_SERVICE_T *service = state->services[i];
1468 VCHIQ_INSTANCE_T instance;
1469
1470 if (service && (service->base.callback == service_callback)) {
1471 instance = service->instance;
1472 if (instance)
1473 instance->mark = 0;
1474 }
1475 }
1476
1477 for (i = 0; i < state->unused_service; i++) {
1478 VCHIQ_SERVICE_T *service = state->services[i];
1479 VCHIQ_INSTANCE_T instance;
1480
1481 if (service && (service->base.callback == service_callback)) {
1482 instance = service->instance;
1483 if (instance && !instance->mark) {
1484 len = snprintf(buf, sizeof(buf),
df044ebf
GKH
1485 "Instance %pK: pid %d,%s completions %d/%d",
1486 instance, instance->pid,
71bad7f0 1487 instance->connected ? " connected, " :
1488 "",
1489 instance->completion_insert -
1490 instance->completion_remove,
1491 MAX_COMPLETIONS);
1492
1493 vchiq_dump(dump_context, buf, len + 1);
1494
1495 instance->mark = 1;
1496 }
1497 }
1498 }
1499}
1500
1501/****************************************************************************
1502*
1503* vchiq_dump_platform_service_state
1504*
1505***************************************************************************/
1506
1507void
1508vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
1509{
1510 USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
1511 char buf[80];
1512 int len;
1513
df044ebf 1514 len = snprintf(buf, sizeof(buf), " instance %pK", service->instance);
71bad7f0 1515
1516 if ((service->base.callback == service_callback) &&
1517 user_service->is_vchi) {
1518 len += snprintf(buf + len, sizeof(buf) - len,
1519 ", %d/%d messages",
1520 user_service->msg_insert - user_service->msg_remove,
1521 MSG_QUEUE_SIZE);
1522
1523 if (user_service->dequeue_pending)
1524 len += snprintf(buf + len, sizeof(buf) - len,
1525 " (dequeue pending)");
1526 }
1527
1528 vchiq_dump(dump_context, buf, len + 1);
1529}
1530
1531/****************************************************************************
1532*
1533* dump_user_mem
1534*
1535***************************************************************************/
1536
1537static void
1538dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1539{
1540 int rc;
1541 uint8_t *end_virt_addr = virt_addr + num_bytes;
1542 int num_pages;
1543 int offset;
1544 int end_offset;
1545 int page_idx;
1546 int prev_idx;
1547 struct page *page;
1548 struct page **pages;
1549 uint8_t *kmapped_virt_ptr;
1550
1551 /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
1552
1553 virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
1554 end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
1555 ~0x0fuL);
1556
1557 offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
1558 end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
1559
1560 num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1561
1562 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1563 if (pages == NULL) {
1564 vchiq_log_error(vchiq_arm_log_level,
1565 "Unable to allocation memory for %d pages\n",
1566 num_pages);
1567 return;
1568 }
1569
1570 down_read(&current->mm->mmap_sem);
166beccd 1571 rc = get_user_pages(
71bad7f0 1572 (unsigned long)virt_addr, /* start */
1573 num_pages, /* len */
768ae309 1574 0, /* gup_flags */
71bad7f0 1575 pages, /* pages (array of page pointers) */
1576 NULL); /* vmas */
1577 up_read(&current->mm->mmap_sem);
1578
1579 prev_idx = -1;
1580 page = NULL;
1581
0feb1ed5
SW
1582 if (rc < 0) {
1583 vchiq_log_error(vchiq_arm_log_level,
1584 "Failed to get user pages: %d\n", rc);
1585 goto out;
1586 }
1587
71bad7f0 1588 while (offset < end_offset) {
1589
1590 int page_offset = offset % PAGE_SIZE;
1591 page_idx = offset / PAGE_SIZE;
1592
1593 if (page_idx != prev_idx) {
1594
1595 if (page != NULL)
1596 kunmap(page);
1597 page = pages[page_idx];
1598 kmapped_virt_ptr = kmap(page);
1599
1600 prev_idx = page_idx;
1601 }
1602
1603 if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
1604 vchiq_log_dump_mem("ph",
1605 (uint32_t)(unsigned long)&kmapped_virt_ptr[
1606 page_offset],
1607 &kmapped_virt_ptr[page_offset], 16);
1608
1609 offset += 16;
1610 }
0feb1ed5
SW
1611
1612out:
71bad7f0 1613 if (page != NULL)
1614 kunmap(page);
1615
1616 for (page_idx = 0; page_idx < num_pages; page_idx++)
232664b3 1617 put_page(pages[page_idx]);
71bad7f0 1618
1619 kfree(pages);
1620}
1621
1622/****************************************************************************
1623*
1624* vchiq_read
1625*
1626***************************************************************************/
1627
1628static ssize_t
1629vchiq_read(struct file *file, char __user *buf,
1630 size_t count, loff_t *ppos)
1631{
1632 DUMP_CONTEXT_T context;
1633 context.buf = buf;
1634 context.actual = 0;
1635 context.space = count;
1636 context.offset = *ppos;
1637
1638 vchiq_dump_state(&context, &g_state);
1639
1640 *ppos += context.actual;
1641
1642 return context.actual;
1643}
1644
1645VCHIQ_STATE_T *
1646vchiq_get_state(void)
1647{
1648
1649 if (g_state.remote == NULL)
1650 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
1651 else if (g_state.remote->initialised != 1)
1652 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
1653 __func__, g_state.remote->initialised);
1654
1655 return ((g_state.remote != NULL) &&
1656 (g_state.remote->initialised == 1)) ? &g_state : NULL;
1657}
1658
1659static const struct file_operations
1660vchiq_fops = {
1661 .owner = THIS_MODULE,
1662 .unlocked_ioctl = vchiq_ioctl,
1663 .open = vchiq_open,
1664 .release = vchiq_release,
1665 .read = vchiq_read
1666};
1667
1668/*
1669 * Autosuspend related functionality
1670 */
1671
1672int
1673vchiq_videocore_wanted(VCHIQ_STATE_T *state)
1674{
1675 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1676 if (!arm_state)
1677 /* autosuspend not supported - always return wanted */
1678 return 1;
1679 else if (arm_state->blocked_count)
1680 return 1;
1681 else if (!arm_state->videocore_use_count)
1682 /* usage count zero - check for override unless we're forcing */
1683 if (arm_state->resume_blocked)
1684 return 0;
1685 else
1686 return vchiq_platform_videocore_wanted(state);
1687 else
1688 /* non-zero usage count - videocore still required */
1689 return 1;
1690}
1691
1692static VCHIQ_STATUS_T
1693vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
1694 VCHIQ_HEADER_T *header,
1695 VCHIQ_SERVICE_HANDLE_T service_user,
1696 void *bulk_user)
1697{
1698 vchiq_log_error(vchiq_susp_log_level,
1699 "%s callback reason %d", __func__, reason);
1700 return 0;
1701}
1702
1703static int
1704vchiq_keepalive_thread_func(void *v)
1705{
1706 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
1707 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1708
1709 VCHIQ_STATUS_T status;
1710 VCHIQ_INSTANCE_T instance;
1711 VCHIQ_SERVICE_HANDLE_T ka_handle;
1712
1713 VCHIQ_SERVICE_PARAMS_T params = {
1714 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1715 .callback = vchiq_keepalive_vchiq_callback,
1716 .version = KEEPALIVE_VER,
1717 .version_min = KEEPALIVE_VER_MIN
1718 };
1719
1720 status = vchiq_initialise(&instance);
1721 if (status != VCHIQ_SUCCESS) {
1722 vchiq_log_error(vchiq_susp_log_level,
1723 "%s vchiq_initialise failed %d", __func__, status);
1724 goto exit;
1725 }
1726
1727 status = vchiq_connect(instance);
1728 if (status != VCHIQ_SUCCESS) {
1729 vchiq_log_error(vchiq_susp_log_level,
1730 "%s vchiq_connect failed %d", __func__, status);
1731 goto shutdown;
1732 }
1733
1734 status = vchiq_add_service(instance, &params, &ka_handle);
1735 if (status != VCHIQ_SUCCESS) {
1736 vchiq_log_error(vchiq_susp_log_level,
1737 "%s vchiq_open_service failed %d", __func__, status);
1738 goto shutdown;
1739 }
1740
1741 while (1) {
1742 long rc = 0, uc = 0;
1743 if (wait_for_completion_interruptible(&arm_state->ka_evt)
1744 != 0) {
1745 vchiq_log_error(vchiq_susp_log_level,
1746 "%s interrupted", __func__);
1747 flush_signals(current);
1748 continue;
1749 }
1750
1751 /* read and clear counters. Do release_count then use_count to
1752 * prevent getting more releases than uses */
1753 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1754 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1755
1756 /* Call use/release service the requisite number of times.
1757 * Process use before release so use counts don't go negative */
1758 while (uc--) {
1759 atomic_inc(&arm_state->ka_use_ack_count);
1760 status = vchiq_use_service(ka_handle);
1761 if (status != VCHIQ_SUCCESS) {
1762 vchiq_log_error(vchiq_susp_log_level,
1763 "%s vchiq_use_service error %d",
1764 __func__, status);
1765 }
1766 }
1767 while (rc--) {
1768 status = vchiq_release_service(ka_handle);
1769 if (status != VCHIQ_SUCCESS) {
1770 vchiq_log_error(vchiq_susp_log_level,
1771 "%s vchiq_release_service error %d",
1772 __func__, status);
1773 }
1774 }
1775 }
1776
1777shutdown:
1778 vchiq_shutdown(instance);
1779exit:
1780 return 0;
1781}
1782
1783
1784
1785VCHIQ_STATUS_T
1786vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
1787{
1788 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1789
1790 if (arm_state) {
1791 rwlock_init(&arm_state->susp_res_lock);
1792
1793 init_completion(&arm_state->ka_evt);
1794 atomic_set(&arm_state->ka_use_count, 0);
1795 atomic_set(&arm_state->ka_use_ack_count, 0);
1796 atomic_set(&arm_state->ka_release_count, 0);
1797
1798 init_completion(&arm_state->vc_suspend_complete);
1799
1800 init_completion(&arm_state->vc_resume_complete);
1801 /* Initialise to 'done' state. We only want to block on resume
1802 * completion while videocore is suspended. */
1803 set_resume_state(arm_state, VC_RESUME_RESUMED);
1804
1805 init_completion(&arm_state->resume_blocker);
1806 /* Initialise to 'done' state. We only want to block on this
1807 * completion while resume is blocked */
1808 complete_all(&arm_state->resume_blocker);
1809
1810 init_completion(&arm_state->blocked_blocker);
1811 /* Initialise to 'done' state. We only want to block on this
1812 * completion while things are waiting on the resume blocker */
1813 complete_all(&arm_state->blocked_blocker);
1814
1815 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
1816 arm_state->suspend_timer_running = 0;
1817 init_timer(&arm_state->suspend_timer);
1818 arm_state->suspend_timer.data = (unsigned long)(state);
1819 arm_state->suspend_timer.function = suspend_timer_callback;
1820
1821 arm_state->first_connect = 0;
1822
1823 }
1824 return status;
1825}
1826
1827/*
1828** Functions to modify the state variables;
1829** set_suspend_state
1830** set_resume_state
1831**
1832** There are more state variables than we might like, so ensure they remain in
1833** step. Suspend and resume state are maintained separately, since most of
1834** these state machines can operate independently. However, there are a few
1835** states where state transitions in one state machine cause a reset to the
1836** other state machine. In addition, there are some completion events which
1837** need to occur on state machine reset and end-state(s), so these are also
1838** dealt with in these functions.
1839**
1840** In all states we set the state variable according to the input, but in some
1841** cases we perform additional steps outlined below;
1842**
1843** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
1844** The suspend completion is completed after any suspend
1845** attempt. When we reset the state machine we also reset
1846** the completion. This reset occurs when videocore is
1847** resumed, and also if we initiate suspend after a suspend
1848** failure.
1849**
1850** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
1851** suspend - ie from this point on we must try to suspend
1852** before resuming can occur. We therefore also reset the
1853** resume state machine to VC_RESUME_IDLE in this state.
1854**
1855** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
1856** complete_all on the suspend completion to notify
1857** anything waiting for suspend to happen.
1858**
1859** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
1860** initiate resume, so no need to alter resume state.
1861** We call complete_all on the suspend completion to notify
1862** of suspend rejection.
1863**
1864** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
1865** suspend completion and reset the resume state machine.
1866**
1867** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
1868** resume completion is in it's 'done' state whenever
1869** videcore is running. Therfore, the VC_RESUME_IDLE state
1870** implies that videocore is suspended.
1871** Hence, any thread which needs to wait until videocore is
1872** running can wait on this completion - it will only block
1873** if videocore is suspended.
1874**
1875** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
1876** Call complete_all on the resume completion to unblock
1877** any threads waiting for resume. Also reset the suspend
1878** state machine to it's idle state.
1879**
1880** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
1881*/
1882
1883void
1884set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
1885 enum vc_suspend_status new_state)
1886{
1887 /* set the state in all cases */
1888 arm_state->vc_suspend_state = new_state;
1889
1890 /* state specific additional actions */
1891 switch (new_state) {
1892 case VC_SUSPEND_FORCE_CANCELED:
1893 complete_all(&arm_state->vc_suspend_complete);
1894 break;
1895 case VC_SUSPEND_REJECTED:
1896 complete_all(&arm_state->vc_suspend_complete);
1897 break;
1898 case VC_SUSPEND_FAILED:
1899 complete_all(&arm_state->vc_suspend_complete);
1900 arm_state->vc_resume_state = VC_RESUME_RESUMED;
1901 complete_all(&arm_state->vc_resume_complete);
1902 break;
1903 case VC_SUSPEND_IDLE:
1904 reinit_completion(&arm_state->vc_suspend_complete);
1905 break;
1906 case VC_SUSPEND_REQUESTED:
1907 break;
1908 case VC_SUSPEND_IN_PROGRESS:
1909 set_resume_state(arm_state, VC_RESUME_IDLE);
1910 break;
1911 case VC_SUSPEND_SUSPENDED:
1912 complete_all(&arm_state->vc_suspend_complete);
1913 break;
1914 default:
1915 BUG();
1916 break;
1917 }
1918}
1919
1920void
1921set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
1922 enum vc_resume_status new_state)
1923{
1924 /* set the state in all cases */
1925 arm_state->vc_resume_state = new_state;
1926
1927 /* state specific additional actions */
1928 switch (new_state) {
1929 case VC_RESUME_FAILED:
1930 break;
1931 case VC_RESUME_IDLE:
1932 reinit_completion(&arm_state->vc_resume_complete);
1933 break;
1934 case VC_RESUME_REQUESTED:
1935 break;
1936 case VC_RESUME_IN_PROGRESS:
1937 break;
1938 case VC_RESUME_RESUMED:
1939 complete_all(&arm_state->vc_resume_complete);
1940 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
1941 break;
1942 default:
1943 BUG();
1944 break;
1945 }
1946}
1947
1948
1949/* should be called with the write lock held */
1950inline void
1951start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1952{
1953 del_timer(&arm_state->suspend_timer);
1954 arm_state->suspend_timer.expires = jiffies +
1955 msecs_to_jiffies(arm_state->
1956 suspend_timer_timeout);
1957 add_timer(&arm_state->suspend_timer);
1958 arm_state->suspend_timer_running = 1;
1959}
1960
1961/* should be called with the write lock held */
1962static inline void
1963stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1964{
1965 if (arm_state->suspend_timer_running) {
1966 del_timer(&arm_state->suspend_timer);
1967 arm_state->suspend_timer_running = 0;
1968 }
1969}
1970
1971static inline int
1972need_resume(VCHIQ_STATE_T *state)
1973{
1974 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1975 return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
1976 (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
1977 vchiq_videocore_wanted(state);
1978}
1979
1980static int
1981block_resume(VCHIQ_ARM_STATE_T *arm_state)
1982{
1983 int status = VCHIQ_SUCCESS;
1984 const unsigned long timeout_val =
1985 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
1986 int resume_count = 0;
1987
1988 /* Allow any threads which were blocked by the last force suspend to
1989 * complete if they haven't already. Only give this one shot; if
1990 * blocked_count is incremented after blocked_blocker is completed
1991 * (which only happens when blocked_count hits 0) then those threads
1992 * will have to wait until next time around */
1993 if (arm_state->blocked_count) {
1994 reinit_completion(&arm_state->blocked_blocker);
1995 write_unlock_bh(&arm_state->susp_res_lock);
1996 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
1997 "blocked clients", __func__);
1998 if (wait_for_completion_interruptible_timeout(
1999 &arm_state->blocked_blocker, timeout_val)
2000 <= 0) {
2001 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
2002 "previously blocked clients failed" , __func__);
2003 status = VCHIQ_ERROR;
2004 write_lock_bh(&arm_state->susp_res_lock);
2005 goto out;
2006 }
2007 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
2008 "clients resumed", __func__);
2009 write_lock_bh(&arm_state->susp_res_lock);
2010 }
2011
2012 /* We need to wait for resume to complete if it's in process */
2013 while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
2014 arm_state->vc_resume_state > VC_RESUME_IDLE) {
2015 if (resume_count > 1) {
2016 status = VCHIQ_ERROR;
2017 vchiq_log_error(vchiq_susp_log_level, "%s waited too "
2018 "many times for resume" , __func__);
2019 goto out;
2020 }
2021 write_unlock_bh(&arm_state->susp_res_lock);
2022 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
2023 __func__);
2024 if (wait_for_completion_interruptible_timeout(
2025 &arm_state->vc_resume_complete, timeout_val)
2026 <= 0) {
2027 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
2028 "resume failed (%s)", __func__,
2029 resume_state_names[arm_state->vc_resume_state +
2030 VC_RESUME_NUM_OFFSET]);
2031 status = VCHIQ_ERROR;
2032 write_lock_bh(&arm_state->susp_res_lock);
2033 goto out;
2034 }
2035 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
2036 write_lock_bh(&arm_state->susp_res_lock);
2037 resume_count++;
2038 }
2039 reinit_completion(&arm_state->resume_blocker);
2040 arm_state->resume_blocked = 1;
2041
2042out:
2043 return status;
2044}
2045
2046static inline void
2047unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
2048{
2049 complete_all(&arm_state->resume_blocker);
2050 arm_state->resume_blocked = 0;
2051}
2052
2053/* Initiate suspend via slot handler. Should be called with the write lock
2054 * held */
2055VCHIQ_STATUS_T
2056vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
2057{
2058 VCHIQ_STATUS_T status = VCHIQ_ERROR;
2059 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2060
2061 if (!arm_state)
2062 goto out;
2063
2064 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2065 status = VCHIQ_SUCCESS;
2066
2067
2068 switch (arm_state->vc_suspend_state) {
2069 case VC_SUSPEND_REQUESTED:
2070 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
2071 "requested", __func__);
2072 break;
2073 case VC_SUSPEND_IN_PROGRESS:
2074 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
2075 "progress", __func__);
2076 break;
2077
2078 default:
2079 /* We don't expect to be in other states, so log but continue
2080 * anyway */
2081 vchiq_log_error(vchiq_susp_log_level,
2082 "%s unexpected suspend state %s", __func__,
2083 suspend_state_names[arm_state->vc_suspend_state +
2084 VC_SUSPEND_NUM_OFFSET]);
2085 /* fall through */
2086 case VC_SUSPEND_REJECTED:
2087 case VC_SUSPEND_FAILED:
2088 /* Ensure any idle state actions have been run */
2089 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2090 /* fall through */
2091 case VC_SUSPEND_IDLE:
2092 vchiq_log_info(vchiq_susp_log_level,
2093 "%s: suspending", __func__);
2094 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
2095 /* kick the slot handler thread to initiate suspend */
2096 request_poll(state, NULL, 0);
2097 break;
2098 }
2099
2100out:
2101 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2102 return status;
2103}
2104
2105void
2106vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
2107{
2108 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2109 int susp = 0;
2110
2111 if (!arm_state)
2112 goto out;
2113
2114 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2115
2116 write_lock_bh(&arm_state->susp_res_lock);
2117 if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2118 arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2119 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2120 susp = 1;
2121 }
2122 write_unlock_bh(&arm_state->susp_res_lock);
2123
2124 if (susp)
2125 vchiq_platform_suspend(state);
2126
2127out:
2128 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2129 return;
2130}
2131
2132
2133static void
2134output_timeout_error(VCHIQ_STATE_T *state)
2135{
2136 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
6d5f49a9 2137 char err[50] = "";
71bad7f0 2138 int vc_use_count = arm_state->videocore_use_count;
2139 int active_services = state->unused_service;
2140 int i;
2141
2142 if (!arm_state->videocore_use_count) {
6d5f49a9 2143 snprintf(err, sizeof(err), " Videocore usecount is 0");
71bad7f0 2144 goto output_msg;
2145 }
2146 for (i = 0; i < active_services; i++) {
2147 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2148 if (service_ptr && service_ptr->service_use_count &&
2149 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
6d5f49a9 2150 snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
71bad7f0 2151 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2152 service_ptr->base.fourcc),
2153 service_ptr->client_id,
2154 service_ptr->service_use_count,
2155 service_ptr->service_use_count ==
2156 vc_use_count ? "" : " (+ more)");
2157 break;
2158 }
2159 }
2160
2161output_msg:
2162 vchiq_log_error(vchiq_susp_log_level,
2163 "timed out waiting for vc suspend (%d).%s",
6d5f49a9 2164 arm_state->autosuspend_override, err);
71bad7f0 2165
2166}
2167
2168/* Try to get videocore into suspended state, regardless of autosuspend state.
2169** We don't actually force suspend, since videocore may get into a bad state
2170** if we force suspend at a bad time. Instead, we wait for autosuspend to
2171** determine a good point to suspend. If this doesn't happen within 100ms we
2172** report failure.
2173**
2174** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2175** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2176*/
2177VCHIQ_STATUS_T
2178vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
2179{
2180 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2181 VCHIQ_STATUS_T status = VCHIQ_ERROR;
2182 long rc = 0;
2183 int repeat = -1;
2184
2185 if (!arm_state)
2186 goto out;
2187
2188 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2189
2190 write_lock_bh(&arm_state->susp_res_lock);
2191
2192 status = block_resume(arm_state);
2193 if (status != VCHIQ_SUCCESS)
2194 goto unlock;
2195 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2196 /* Already suspended - just block resume and exit */
2197 vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
2198 __func__);
2199 status = VCHIQ_SUCCESS;
2200 goto unlock;
2201 } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
2202 /* initiate suspend immediately in the case that we're waiting
2203 * for the timeout */
2204 stop_suspend_timer(arm_state);
2205 if (!vchiq_videocore_wanted(state)) {
2206 vchiq_log_info(vchiq_susp_log_level, "%s videocore "
2207 "idle, initiating suspend", __func__);
2208 status = vchiq_arm_vcsuspend(state);
2209 } else if (arm_state->autosuspend_override <
2210 FORCE_SUSPEND_FAIL_MAX) {
2211 vchiq_log_info(vchiq_susp_log_level, "%s letting "
2212 "videocore go idle", __func__);
2213 status = VCHIQ_SUCCESS;
2214 } else {
2215 vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
2216 "many times - attempting suspend", __func__);
2217 status = vchiq_arm_vcsuspend(state);
2218 }
2219 } else {
2220 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
2221 "in progress - wait for completion", __func__);
2222 status = VCHIQ_SUCCESS;
2223 }
2224
2225 /* Wait for suspend to happen due to system idle (not forced..) */
2226 if (status != VCHIQ_SUCCESS)
2227 goto unblock_resume;
2228
2229 do {
2230 write_unlock_bh(&arm_state->susp_res_lock);
2231
2232 rc = wait_for_completion_interruptible_timeout(
2233 &arm_state->vc_suspend_complete,
2234 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
2235
2236 write_lock_bh(&arm_state->susp_res_lock);
2237 if (rc < 0) {
2238 vchiq_log_warning(vchiq_susp_log_level, "%s "
2239 "interrupted waiting for suspend", __func__);
2240 status = VCHIQ_ERROR;
2241 goto unblock_resume;
2242 } else if (rc == 0) {
2243 if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
2244 /* Repeat timeout once if in progress */
2245 if (repeat < 0) {
2246 repeat = 1;
2247 continue;
2248 }
2249 }
2250 arm_state->autosuspend_override++;
2251 output_timeout_error(state);
2252
2253 status = VCHIQ_RETRY;
2254 goto unblock_resume;
2255 }
2256 } while (0 < (repeat--));
2257
2258 /* Check and report state in case we need to abort ARM suspend */
2259 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
2260 status = VCHIQ_RETRY;
2261 vchiq_log_error(vchiq_susp_log_level,
2262 "%s videocore suspend failed (state %s)", __func__,
2263 suspend_state_names[arm_state->vc_suspend_state +
2264 VC_SUSPEND_NUM_OFFSET]);
2265 /* Reset the state only if it's still in an error state.
2266 * Something could have already initiated another suspend. */
2267 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
2268 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2269
2270 goto unblock_resume;
2271 }
2272
2273 /* successfully suspended - unlock and exit */
2274 goto unlock;
2275
2276unblock_resume:
2277 /* all error states need to unblock resume before exit */
2278 unblock_resume(arm_state);
2279
2280unlock:
2281 write_unlock_bh(&arm_state->susp_res_lock);
2282
2283out:
2284 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2285 return status;
2286}
2287
2288void
2289vchiq_check_suspend(VCHIQ_STATE_T *state)
2290{
2291 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2292
2293 if (!arm_state)
2294 goto out;
2295
2296 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2297
2298 write_lock_bh(&arm_state->susp_res_lock);
2299 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2300 arm_state->first_connect &&
2301 !vchiq_videocore_wanted(state)) {
2302 vchiq_arm_vcsuspend(state);
2303 }
2304 write_unlock_bh(&arm_state->susp_res_lock);
2305
2306out:
2307 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2308 return;
2309}
2310
2311
2312int
2313vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
2314{
2315 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2316 int resume = 0;
2317 int ret = -1;
2318
2319 if (!arm_state)
2320 goto out;
2321
2322 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2323
2324 write_lock_bh(&arm_state->susp_res_lock);
2325 unblock_resume(arm_state);
2326 resume = vchiq_check_resume(state);
2327 write_unlock_bh(&arm_state->susp_res_lock);
2328
2329 if (resume) {
2330 if (wait_for_completion_interruptible(
2331 &arm_state->vc_resume_complete) < 0) {
2332 vchiq_log_error(vchiq_susp_log_level,
2333 "%s interrupted", __func__);
2334 /* failed, cannot accurately derive suspend
2335 * state, so exit early. */
2336 goto out;
2337 }
2338 }
2339
2340 read_lock_bh(&arm_state->susp_res_lock);
2341 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2342 vchiq_log_info(vchiq_susp_log_level,
2343 "%s: Videocore remains suspended", __func__);
2344 } else {
2345 vchiq_log_info(vchiq_susp_log_level,
2346 "%s: Videocore resumed", __func__);
2347 ret = 0;
2348 }
2349 read_unlock_bh(&arm_state->susp_res_lock);
2350out:
2351 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2352 return ret;
2353}
2354
2355/* This function should be called with the write lock held */
2356int
2357vchiq_check_resume(VCHIQ_STATE_T *state)
2358{
2359 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2360 int resume = 0;
2361
2362 if (!arm_state)
2363 goto out;
2364
2365 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2366
2367 if (need_resume(state)) {
2368 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2369 request_poll(state, NULL, 0);
2370 resume = 1;
2371 }
2372
2373out:
2374 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2375 return resume;
2376}
2377
2378void
2379vchiq_platform_check_resume(VCHIQ_STATE_T *state)
2380{
2381 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2382 int res = 0;
2383
2384 if (!arm_state)
2385 goto out;
2386
2387 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2388
2389 write_lock_bh(&arm_state->susp_res_lock);
2390 if (arm_state->wake_address == 0) {
2391 vchiq_log_info(vchiq_susp_log_level,
2392 "%s: already awake", __func__);
2393 goto unlock;
2394 }
2395 if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
2396 vchiq_log_info(vchiq_susp_log_level,
2397 "%s: already resuming", __func__);
2398 goto unlock;
2399 }
2400
2401 if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
2402 set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
2403 res = 1;
2404 } else
2405 vchiq_log_trace(vchiq_susp_log_level,
2406 "%s: not resuming (resume state %s)", __func__,
2407 resume_state_names[arm_state->vc_resume_state +
2408 VC_RESUME_NUM_OFFSET]);
2409
2410unlock:
2411 write_unlock_bh(&arm_state->susp_res_lock);
2412
2413 if (res)
2414 vchiq_platform_resume(state);
2415
2416out:
2417 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2418 return;
2419
2420}
2421
2422
2423
2424VCHIQ_STATUS_T
2425vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
2426 enum USE_TYPE_E use_type)
2427{
2428 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2429 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2430 char entity[16];
2431 int *entity_uc;
2432 int local_uc, local_entity_uc;
2433
2434 if (!arm_state)
2435 goto out;
2436
2437 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2438
2439 if (use_type == USE_TYPE_VCHIQ) {
2440 sprintf(entity, "VCHIQ: ");
2441 entity_uc = &arm_state->peer_use_count;
2442 } else if (service) {
2443 sprintf(entity, "%c%c%c%c:%03d",
2444 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2445 service->client_id);
2446 entity_uc = &service->service_use_count;
2447 } else {
2448 vchiq_log_error(vchiq_susp_log_level, "%s null service "
2449 "ptr", __func__);
2450 ret = VCHIQ_ERROR;
2451 goto out;
2452 }
2453
2454 write_lock_bh(&arm_state->susp_res_lock);
2455 while (arm_state->resume_blocked) {
2456 /* If we call 'use' while force suspend is waiting for suspend,
2457 * then we're about to block the thread which the force is
2458 * waiting to complete, so we're bound to just time out. In this
2459 * case, set the suspend state such that the wait will be
2460 * canceled, so we can complete as quickly as possible. */
2461 if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2462 VC_SUSPEND_IDLE) {
2463 set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2464 break;
2465 }
2466 /* If suspend is already in progress then we need to block */
2467 if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2468 /* Indicate that there are threads waiting on the resume
2469 * blocker. These need to be allowed to complete before
2470 * a _second_ call to force suspend can complete,
2471 * otherwise low priority threads might never actually
2472 * continue */
2473 arm_state->blocked_count++;
2474 write_unlock_bh(&arm_state->susp_res_lock);
2475 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2476 "blocked - waiting...", __func__, entity);
2477 if (wait_for_completion_killable(
2478 &arm_state->resume_blocker) != 0) {
2479 vchiq_log_error(vchiq_susp_log_level, "%s %s "
2480 "wait for resume blocker interrupted",
2481 __func__, entity);
2482 ret = VCHIQ_ERROR;
2483 write_lock_bh(&arm_state->susp_res_lock);
2484 arm_state->blocked_count--;
2485 write_unlock_bh(&arm_state->susp_res_lock);
2486 goto out;
2487 }
2488 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2489 "unblocked", __func__, entity);
2490 write_lock_bh(&arm_state->susp_res_lock);
2491 if (--arm_state->blocked_count == 0)
2492 complete_all(&arm_state->blocked_blocker);
2493 }
2494 }
2495
2496 stop_suspend_timer(arm_state);
2497
2498 local_uc = ++arm_state->videocore_use_count;
2499 local_entity_uc = ++(*entity_uc);
2500
2501 /* If there's a pending request which hasn't yet been serviced then
2502 * just clear it. If we're past VC_SUSPEND_REQUESTED state then
2503 * vc_resume_complete will block until we either resume or fail to
2504 * suspend */
2505 if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2506 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2507
2508 if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2509 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2510 vchiq_log_info(vchiq_susp_log_level,
2511 "%s %s count %d, state count %d",
2512 __func__, entity, local_entity_uc, local_uc);
2513 request_poll(state, NULL, 0);
2514 } else
2515 vchiq_log_trace(vchiq_susp_log_level,
2516 "%s %s count %d, state count %d",
2517 __func__, entity, *entity_uc, local_uc);
2518
2519
2520 write_unlock_bh(&arm_state->susp_res_lock);
2521
2522 /* Completion is in a done state when we're not suspended, so this won't
2523 * block for the non-suspended case. */
2524 if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
2525 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
2526 __func__, entity);
2527 if (wait_for_completion_killable(
2528 &arm_state->vc_resume_complete) != 0) {
2529 vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
2530 "resume interrupted", __func__, entity);
2531 ret = VCHIQ_ERROR;
2532 goto out;
2533 }
2534 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
2535 entity);
2536 }
2537
2538 if (ret == VCHIQ_SUCCESS) {
2539 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2540 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2541 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2542 /* Send the use notify to videocore */
2543 status = vchiq_send_remote_use_active(state);
2544 if (status == VCHIQ_SUCCESS)
2545 ack_cnt--;
2546 else
2547 atomic_add(ack_cnt,
2548 &arm_state->ka_use_ack_count);
2549 }
2550 }
2551
2552out:
2553 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2554 return ret;
2555}
2556
2557VCHIQ_STATUS_T
2558vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
2559{
2560 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2561 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2562 char entity[16];
2563 int *entity_uc;
2564 int local_uc, local_entity_uc;
2565
2566 if (!arm_state)
2567 goto out;
2568
2569 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2570
2571 if (service) {
2572 sprintf(entity, "%c%c%c%c:%03d",
2573 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2574 service->client_id);
2575 entity_uc = &service->service_use_count;
2576 } else {
2577 sprintf(entity, "PEER: ");
2578 entity_uc = &arm_state->peer_use_count;
2579 }
2580
2581 write_lock_bh(&arm_state->susp_res_lock);
2582 if (!arm_state->videocore_use_count || !(*entity_uc)) {
2583 /* Don't use BUG_ON - don't allow user thread to crash kernel */
2584 WARN_ON(!arm_state->videocore_use_count);
2585 WARN_ON(!(*entity_uc));
2586 ret = VCHIQ_ERROR;
2587 goto unlock;
2588 }
2589 local_uc = --arm_state->videocore_use_count;
2590 local_entity_uc = --(*entity_uc);
2591
2592 if (!vchiq_videocore_wanted(state)) {
2593 if (vchiq_platform_use_suspend_timer() &&
2594 !arm_state->resume_blocked) {
2595 /* Only use the timer if we're not trying to force
2596 * suspend (=> resume_blocked) */
2597 start_suspend_timer(arm_state);
2598 } else {
2599 vchiq_log_info(vchiq_susp_log_level,
2600 "%s %s count %d, state count %d - suspending",
2601 __func__, entity, *entity_uc,
2602 arm_state->videocore_use_count);
2603 vchiq_arm_vcsuspend(state);
2604 }
2605 } else
2606 vchiq_log_trace(vchiq_susp_log_level,
2607 "%s %s count %d, state count %d",
2608 __func__, entity, *entity_uc,
2609 arm_state->videocore_use_count);
2610
2611unlock:
2612 write_unlock_bh(&arm_state->susp_res_lock);
2613
2614out:
2615 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2616 return ret;
2617}
2618
2619void
2620vchiq_on_remote_use(VCHIQ_STATE_T *state)
2621{
2622 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2623 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2624 atomic_inc(&arm_state->ka_use_count);
2625 complete(&arm_state->ka_evt);
2626}
2627
2628void
2629vchiq_on_remote_release(VCHIQ_STATE_T *state)
2630{
2631 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2632 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2633 atomic_inc(&arm_state->ka_release_count);
2634 complete(&arm_state->ka_evt);
2635}
2636
2637VCHIQ_STATUS_T
2638vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
2639{
2640 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2641}
2642
2643VCHIQ_STATUS_T
2644vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
2645{
2646 return vchiq_release_internal(service->state, service);
2647}
2648
2649VCHIQ_DEBUGFS_NODE_T *
2650vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
2651{
2652 return &instance->debugfs_node;
2653}
2654
2655int
2656vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
2657{
2658 VCHIQ_SERVICE_T *service;
2659 int use_count = 0, i;
2660 i = 0;
2661 while ((service = next_service_by_instance(instance->state,
2662 instance, &i)) != NULL) {
2663 use_count += service->service_use_count;
2664 unlock_service(service);
2665 }
2666 return use_count;
2667}
2668
2669int
2670vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
2671{
2672 return instance->pid;
2673}
2674
2675int
2676vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
2677{
2678 return instance->trace;
2679}
2680
2681void
2682vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
2683{
2684 VCHIQ_SERVICE_T *service;
2685 int i;
2686 i = 0;
2687 while ((service = next_service_by_instance(instance->state,
2688 instance, &i)) != NULL) {
2689 service->trace = trace;
2690 unlock_service(service);
2691 }
2692 instance->trace = (trace != 0);
2693}
2694
2695static void suspend_timer_callback(unsigned long context)
2696{
2697 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
2698 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2699 if (!arm_state)
2700 goto out;
2701 vchiq_log_info(vchiq_susp_log_level,
2702 "%s - suspend timer expired - check suspend", __func__);
2703 vchiq_check_suspend(state);
2704out:
2705 return;
2706}
2707
2708VCHIQ_STATUS_T
2709vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
2710{
2711 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2712 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2713 if (service) {
2714 ret = vchiq_use_internal(service->state, service,
2715 USE_TYPE_SERVICE_NO_RESUME);
2716 unlock_service(service);
2717 }
2718 return ret;
2719}
2720
2721VCHIQ_STATUS_T
2722vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
2723{
2724 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2725 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2726 if (service) {
2727 ret = vchiq_use_internal(service->state, service,
2728 USE_TYPE_SERVICE);
2729 unlock_service(service);
2730 }
2731 return ret;
2732}
2733
2734VCHIQ_STATUS_T
2735vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
2736{
2737 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2738 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2739 if (service) {
2740 ret = vchiq_release_internal(service->state, service);
2741 unlock_service(service);
2742 }
2743 return ret;
2744}
2745
2746void
2747vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
2748{
2749 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2750 int i, j = 0;
2751 /* Only dump 64 services */
2752 static const int local_max_services = 64;
2753 /* If there's more than 64 services, only dump ones with
2754 * non-zero counts */
2755 int only_nonzero = 0;
2756 static const char *nz = "<-- preventing suspend";
2757
2758 enum vc_suspend_status vc_suspend_state;
2759 enum vc_resume_status vc_resume_state;
2760 int peer_count;
2761 int vc_use_count;
2762 int active_services;
2763 struct service_data_struct {
2764 int fourcc;
2765 int clientid;
2766 int use_count;
2767 } service_data[local_max_services];
2768
2769 if (!arm_state)
2770 return;
2771
2772 read_lock_bh(&arm_state->susp_res_lock);
2773 vc_suspend_state = arm_state->vc_suspend_state;
2774 vc_resume_state = arm_state->vc_resume_state;
2775 peer_count = arm_state->peer_use_count;
2776 vc_use_count = arm_state->videocore_use_count;
2777 active_services = state->unused_service;
2778 if (active_services > local_max_services)
2779 only_nonzero = 1;
2780
2781 for (i = 0; (i < active_services) && (j < local_max_services); i++) {
2782 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2783 if (!service_ptr)
2784 continue;
2785
2786 if (only_nonzero && !service_ptr->service_use_count)
2787 continue;
2788
2789 if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
2790 service_data[j].fourcc = service_ptr->base.fourcc;
2791 service_data[j].clientid = service_ptr->client_id;
2792 service_data[j++].use_count = service_ptr->
2793 service_use_count;
2794 }
2795 }
2796
2797 read_unlock_bh(&arm_state->susp_res_lock);
2798
2799 vchiq_log_warning(vchiq_susp_log_level,
2800 "-- Videcore suspend state: %s --",
2801 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
2802 vchiq_log_warning(vchiq_susp_log_level,
2803 "-- Videcore resume state: %s --",
2804 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
2805
2806 if (only_nonzero)
2807 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2808 "services (%d). Only dumping up to first %d services "
2809 "with non-zero use-count", active_services,
2810 local_max_services);
2811
2812 for (i = 0; i < j; i++) {
2813 vchiq_log_warning(vchiq_susp_log_level,
2814 "----- %c%c%c%c:%d service count %d %s",
2815 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2816 service_data[i].clientid,
2817 service_data[i].use_count,
2818 service_data[i].use_count ? nz : "");
2819 }
2820 vchiq_log_warning(vchiq_susp_log_level,
2821 "----- VCHIQ use count count %d", peer_count);
2822 vchiq_log_warning(vchiq_susp_log_level,
2823 "--- Overall vchiq instance use count %d", vc_use_count);
2824
2825 vchiq_dump_platform_use_state(state);
2826}
2827
2828VCHIQ_STATUS_T
2829vchiq_check_service(VCHIQ_SERVICE_T *service)
2830{
2831 VCHIQ_ARM_STATE_T *arm_state;
2832 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2833
2834 if (!service || !service->state)
2835 goto out;
2836
2837 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2838
2839 arm_state = vchiq_platform_get_arm_state(service->state);
2840
2841 read_lock_bh(&arm_state->susp_res_lock);
2842 if (service->service_use_count)
2843 ret = VCHIQ_SUCCESS;
2844 read_unlock_bh(&arm_state->susp_res_lock);
2845
2846 if (ret == VCHIQ_ERROR) {
2847 vchiq_log_error(vchiq_susp_log_level,
2848 "%s ERROR - %c%c%c%c:%d service count %d, "
2849 "state count %d, videocore suspend state %s", __func__,
2850 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2851 service->client_id, service->service_use_count,
2852 arm_state->videocore_use_count,
2853 suspend_state_names[arm_state->vc_suspend_state +
2854 VC_SUSPEND_NUM_OFFSET]);
2855 vchiq_dump_service_use_state(service->state);
2856 }
2857out:
2858 return ret;
2859}
2860
2861/* stub functions */
2862void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
2863{
2864 (void)state;
2865}
2866
2867void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
2868 VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
2869{
2870 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2871 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2872 get_conn_state_name(oldstate), get_conn_state_name(newstate));
2873 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
2874 write_lock_bh(&arm_state->susp_res_lock);
2875 if (!arm_state->first_connect) {
2876 char threadname[10];
2877 arm_state->first_connect = 1;
2878 write_unlock_bh(&arm_state->susp_res_lock);
2879 snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
2880 state->id);
2881 arm_state->ka_thread = kthread_create(
2882 &vchiq_keepalive_thread_func,
2883 (void *)state,
2884 threadname);
b04451d7 2885 if (IS_ERR(arm_state->ka_thread)) {
71bad7f0 2886 vchiq_log_error(vchiq_susp_log_level,
2887 "vchiq: FATAL: couldn't create thread %s",
2888 threadname);
2889 } else {
2890 wake_up_process(arm_state->ka_thread);
2891 }
2892 } else
2893 write_unlock_bh(&arm_state->susp_res_lock);
2894 }
2895}
2896
2897static int vchiq_probe(struct platform_device *pdev)
2898{
2899 struct device_node *fw_node;
2900 struct rpi_firmware *fw;
2901 int err;
2902 void *ptr_err;
2903
2904 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
71bad7f0 2905 if (!fw_node) {
2906 dev_err(&pdev->dev, "Missing firmware node\n");
2907 return -ENOENT;
2908 }
bea845af 2909
71bad7f0 2910 fw = rpi_firmware_get(fw_node);
d10543ec 2911 of_node_put(fw_node);
71bad7f0 2912 if (!fw)
2913 return -EPROBE_DEFER;
2914
2915 platform_set_drvdata(pdev, fw);
2916
9224c15c 2917 err = vchiq_platform_init(pdev, &g_state);
71bad7f0 2918 if (err != 0)
9224c15c 2919 goto failed_platform_init;
71bad7f0 2920
2921 err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
2922 if (err != 0) {
2923 vchiq_log_error(vchiq_arm_log_level,
2924 "Unable to allocate device number");
9224c15c 2925 goto failed_platform_init;
71bad7f0 2926 }
2927 cdev_init(&vchiq_cdev, &vchiq_fops);
2928 vchiq_cdev.owner = THIS_MODULE;
2929 err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
2930 if (err != 0) {
2931 vchiq_log_error(vchiq_arm_log_level,
2932 "Unable to register device");
2933 goto failed_cdev_add;
2934 }
2935
2936 /* create sysfs entries */
2937 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
2938 ptr_err = vchiq_class;
2939 if (IS_ERR(ptr_err))
2940 goto failed_class_create;
2941
2942 vchiq_dev = device_create(vchiq_class, NULL,
2943 vchiq_devid, NULL, "vchiq");
2944 ptr_err = vchiq_dev;
2945 if (IS_ERR(ptr_err))
2946 goto failed_device_create;
2947
9224c15c
SW
2948 /* create debugfs entries */
2949 err = vchiq_debugfs_init();
71bad7f0 2950 if (err != 0)
9224c15c 2951 goto failed_debugfs_init;
71bad7f0 2952
2953 vchiq_log_info(vchiq_arm_log_level,
2954 "vchiq: initialised - version %d (min %d), device %d.%d",
2955 VCHIQ_VERSION, VCHIQ_VERSION_MIN,
2956 MAJOR(vchiq_devid), MINOR(vchiq_devid));
2957
2958 return 0;
2959
9224c15c 2960failed_debugfs_init:
71bad7f0 2961 device_destroy(vchiq_class, vchiq_devid);
2962failed_device_create:
2963 class_destroy(vchiq_class);
2964failed_class_create:
2965 cdev_del(&vchiq_cdev);
2966 err = PTR_ERR(ptr_err);
2967failed_cdev_add:
2968 unregister_chrdev_region(vchiq_devid, 1);
9224c15c 2969failed_platform_init:
71bad7f0 2970 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2971 return err;
2972}
2973
2974static int vchiq_remove(struct platform_device *pdev)
2975{
0ece01c6 2976 vchiq_debugfs_deinit();
71bad7f0 2977 device_destroy(vchiq_class, vchiq_devid);
2978 class_destroy(vchiq_class);
2979 cdev_del(&vchiq_cdev);
2980 unregister_chrdev_region(vchiq_devid, 1);
2981
2982 return 0;
2983}
2984
2985static const struct of_device_id vchiq_of_match[] = {
2986 { .compatible = "brcm,bcm2835-vchiq", },
2987 {},
2988};
2989MODULE_DEVICE_TABLE(of, vchiq_of_match);
2990
2991static struct platform_driver vchiq_driver = {
2992 .driver = {
2993 .name = "bcm2835_vchiq",
71bad7f0 2994 .of_match_table = vchiq_of_match,
2995 },
2996 .probe = vchiq_probe,
2997 .remove = vchiq_remove,
2998};
2999module_platform_driver(vchiq_driver);
3000
3001MODULE_LICENSE("GPL");
8f8a3402 3002MODULE_DESCRIPTION("Videocore VCHIQ driver");
71bad7f0 3003MODULE_AUTHOR("Broadcom Corporation");