]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-artful-kernel.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_arm.c
1 /**
2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
3 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The names of the above-listed copyright holders may not be used
15 * to endorse or promote products derived from this software without
16 * specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2, as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/sched/signal.h>
38 #include <linux/types.h>
39 #include <linux/errno.h>
40 #include <linux/cdev.h>
41 #include <linux/fs.h>
42 #include <linux/device.h>
43 #include <linux/mm.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/bug.h>
47 #include <linux/semaphore.h>
48 #include <linux/list.h>
49 #include <linux/of.h>
50 #include <linux/platform_device.h>
51 #include <soc/bcm2835/raspberrypi-firmware.h>
52
53 #include "vchiq_core.h"
54 #include "vchiq_ioctl.h"
55 #include "vchiq_arm.h"
56 #include "vchiq_debugfs.h"
57 #include "vchiq_killable.h"
58
59 #define DEVICE_NAME "vchiq"
60
61 /* Override the default prefix, which would be vchiq_arm (from the filename) */
62 #undef MODULE_PARAM_PREFIX
63 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
64
65 #define VCHIQ_MINOR 0
66
67 /* Some per-instance constants */
68 #define MAX_COMPLETIONS 128
69 #define MAX_SERVICES 64
70 #define MAX_ELEMENTS 8
71 #define MSG_QUEUE_SIZE 128
72
73 #define KEEPALIVE_VER 1
74 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
75
76 /* Run time control of log level, based on KERN_XXX level. */
77 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
78 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
79
80 #define SUSPEND_TIMER_TIMEOUT_MS 100
81 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
82
83 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
84 static const char *const suspend_state_names[] = {
85 "VC_SUSPEND_FORCE_CANCELED",
86 "VC_SUSPEND_REJECTED",
87 "VC_SUSPEND_FAILED",
88 "VC_SUSPEND_IDLE",
89 "VC_SUSPEND_REQUESTED",
90 "VC_SUSPEND_IN_PROGRESS",
91 "VC_SUSPEND_SUSPENDED"
92 };
93 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
94 static const char *const resume_state_names[] = {
95 "VC_RESUME_FAILED",
96 "VC_RESUME_IDLE",
97 "VC_RESUME_REQUESTED",
98 "VC_RESUME_IN_PROGRESS",
99 "VC_RESUME_RESUMED"
100 };
101 /* The number of times we allow force suspend to timeout before actually
102 ** _forcing_ suspend. This is to cater for SW which fails to release vchiq
103 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
104 */
105 #define FORCE_SUSPEND_FAIL_MAX 8
106
107 /* The time in ms allowed for videocore to go idle when force suspend has been
108 * requested */
109 #define FORCE_SUSPEND_TIMEOUT_MS 200
110
111
112 static void suspend_timer_callback(unsigned long context);
113
114
115 typedef struct user_service_struct {
116 VCHIQ_SERVICE_T *service;
117 void *userdata;
118 VCHIQ_INSTANCE_T instance;
119 char is_vchi;
120 char dequeue_pending;
121 char close_pending;
122 int message_available_pos;
123 int msg_insert;
124 int msg_remove;
125 struct semaphore insert_event;
126 struct semaphore remove_event;
127 struct semaphore close_event;
128 VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
129 } USER_SERVICE_T;
130
131 struct bulk_waiter_node {
132 struct bulk_waiter bulk_waiter;
133 int pid;
134 struct list_head list;
135 };
136
137 struct vchiq_instance_struct {
138 VCHIQ_STATE_T *state;
139 VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
140 int completion_insert;
141 int completion_remove;
142 struct semaphore insert_event;
143 struct semaphore remove_event;
144 struct mutex completion_mutex;
145
146 int connected;
147 int closing;
148 int pid;
149 int mark;
150 int use_close_delivered;
151 int trace;
152
153 struct list_head bulk_waiter_list;
154 struct mutex bulk_waiter_list_mutex;
155
156 VCHIQ_DEBUGFS_NODE_T debugfs_node;
157 };
158
159 typedef struct dump_context_struct {
160 char __user *buf;
161 size_t actual;
162 size_t space;
163 loff_t offset;
164 } DUMP_CONTEXT_T;
165
166 static struct cdev vchiq_cdev;
167 static dev_t vchiq_devid;
168 static VCHIQ_STATE_T g_state;
169 static struct class *vchiq_class;
170 static struct device *vchiq_dev;
171 static DEFINE_SPINLOCK(msg_queue_spinlock);
172
173 static const char *const ioctl_names[] = {
174 "CONNECT",
175 "SHUTDOWN",
176 "CREATE_SERVICE",
177 "REMOVE_SERVICE",
178 "QUEUE_MESSAGE",
179 "QUEUE_BULK_TRANSMIT",
180 "QUEUE_BULK_RECEIVE",
181 "AWAIT_COMPLETION",
182 "DEQUEUE_MESSAGE",
183 "GET_CLIENT_ID",
184 "GET_CONFIG",
185 "CLOSE_SERVICE",
186 "USE_SERVICE",
187 "RELEASE_SERVICE",
188 "SET_SERVICE_OPTION",
189 "DUMP_PHYS_MEM",
190 "LIB_VERSION",
191 "CLOSE_DELIVERED"
192 };
193
194 vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
195 (VCHIQ_IOC_MAX + 1));
196
197 static void
198 dump_phys_mem(void *virt_addr, u32 num_bytes);
199
200 /****************************************************************************
201 *
202 * add_completion
203 *
204 ***************************************************************************/
205
206 static VCHIQ_STATUS_T
207 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
208 VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
209 void *bulk_userdata)
210 {
211 VCHIQ_COMPLETION_DATA_T *completion;
212 int insert;
213 DEBUG_INITIALISE(g_state.local)
214
215 insert = instance->completion_insert;
216 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
217 /* Out of space - wait for the client */
218 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
219 vchiq_log_trace(vchiq_arm_log_level,
220 "add_completion - completion queue full");
221 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
222 if (down_interruptible(&instance->remove_event) != 0) {
223 vchiq_log_info(vchiq_arm_log_level,
224 "service_callback interrupted");
225 return VCHIQ_RETRY;
226 } else if (instance->closing) {
227 vchiq_log_info(vchiq_arm_log_level,
228 "service_callback closing");
229 return VCHIQ_SUCCESS;
230 }
231 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
232 }
233
234 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
235
236 completion->header = header;
237 completion->reason = reason;
238 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
239 completion->service_userdata = user_service->service;
240 completion->bulk_userdata = bulk_userdata;
241
242 if (reason == VCHIQ_SERVICE_CLOSED) {
243 /* Take an extra reference, to be held until
244 this CLOSED notification is delivered. */
245 lock_service(user_service->service);
246 if (instance->use_close_delivered)
247 user_service->close_pending = 1;
248 }
249
250 /* A write barrier is needed here to ensure that the entire completion
251 record is written out before the insert point. */
252 wmb();
253
254 if (reason == VCHIQ_MESSAGE_AVAILABLE)
255 user_service->message_available_pos = insert;
256
257 insert++;
258 instance->completion_insert = insert;
259
260 up(&instance->insert_event);
261
262 return VCHIQ_SUCCESS;
263 }
264
265 /****************************************************************************
266 *
267 * service_callback
268 *
269 ***************************************************************************/
270
271 static VCHIQ_STATUS_T
272 service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
273 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
274 {
275 /* How do we ensure the callback goes to the right client?
276 ** The service_user data points to a USER_SERVICE_T record containing
277 ** the original callback and the user state structure, which contains a
278 ** circular buffer for completion records.
279 */
280 USER_SERVICE_T *user_service;
281 VCHIQ_SERVICE_T *service;
282 VCHIQ_INSTANCE_T instance;
283 bool skip_completion = false;
284 DEBUG_INITIALISE(g_state.local)
285
286 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
287
288 service = handle_to_service(handle);
289 BUG_ON(!service);
290 user_service = (USER_SERVICE_T *)service->base.userdata;
291 instance = user_service->instance;
292
293 if (!instance || instance->closing)
294 return VCHIQ_SUCCESS;
295
296 vchiq_log_trace(vchiq_arm_log_level,
297 "service_callback - service %lx(%d,%p), reason %d, header %lx, "
298 "instance %lx, bulk_userdata %lx",
299 (unsigned long)user_service,
300 service->localport, user_service->userdata,
301 reason, (unsigned long)header,
302 (unsigned long)instance, (unsigned long)bulk_userdata);
303
304 if (header && user_service->is_vchi) {
305 spin_lock(&msg_queue_spinlock);
306 while (user_service->msg_insert ==
307 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
308 spin_unlock(&msg_queue_spinlock);
309 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
310 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
311 vchiq_log_trace(vchiq_arm_log_level,
312 "service_callback - msg queue full");
313 /* If there is no MESSAGE_AVAILABLE in the completion
314 ** queue, add one
315 */
316 if ((user_service->message_available_pos -
317 instance->completion_remove) < 0) {
318 VCHIQ_STATUS_T status;
319 vchiq_log_info(vchiq_arm_log_level,
320 "Inserting extra MESSAGE_AVAILABLE");
321 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
322 status = add_completion(instance, reason,
323 NULL, user_service, bulk_userdata);
324 if (status != VCHIQ_SUCCESS) {
325 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
326 return status;
327 }
328 }
329
330 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
331 if (down_interruptible(&user_service->remove_event)
332 != 0) {
333 vchiq_log_info(vchiq_arm_log_level,
334 "service_callback interrupted");
335 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
336 return VCHIQ_RETRY;
337 } else if (instance->closing) {
338 vchiq_log_info(vchiq_arm_log_level,
339 "service_callback closing");
340 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
341 return VCHIQ_ERROR;
342 }
343 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
344 spin_lock(&msg_queue_spinlock);
345 }
346
347 user_service->msg_queue[user_service->msg_insert &
348 (MSG_QUEUE_SIZE - 1)] = header;
349 user_service->msg_insert++;
350
351 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
352 ** there is a MESSAGE_AVAILABLE in the completion queue then
353 ** bypass the completion queue.
354 */
355 if (((user_service->message_available_pos -
356 instance->completion_remove) >= 0) ||
357 user_service->dequeue_pending) {
358 user_service->dequeue_pending = 0;
359 skip_completion = true;
360 }
361
362 spin_unlock(&msg_queue_spinlock);
363 up(&user_service->insert_event);
364
365 header = NULL;
366 }
367 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
368
369 if (skip_completion)
370 return VCHIQ_SUCCESS;
371
372 return add_completion(instance, reason, header, user_service,
373 bulk_userdata);
374 }
375
376 /****************************************************************************
377 *
378 * user_service_free
379 *
380 ***************************************************************************/
381 static void
382 user_service_free(void *userdata)
383 {
384 kfree(userdata);
385 }
386
387 /****************************************************************************
388 *
389 * close_delivered
390 *
391 ***************************************************************************/
392 static void close_delivered(USER_SERVICE_T *user_service)
393 {
394 vchiq_log_info(vchiq_arm_log_level,
395 "close_delivered(handle=%x)",
396 user_service->service->handle);
397
398 if (user_service->close_pending) {
399 /* Allow the underlying service to be culled */
400 unlock_service(user_service->service);
401
402 /* Wake the user-thread blocked in close_ or remove_service */
403 up(&user_service->close_event);
404
405 user_service->close_pending = 0;
406 }
407 }
408
409 struct vchiq_io_copy_callback_context {
410 VCHIQ_ELEMENT_T *current_element;
411 size_t current_element_offset;
412 unsigned long elements_to_go;
413 size_t current_offset;
414 };
415
416 static ssize_t
417 vchiq_ioc_copy_element_data(
418 void *context,
419 void *dest,
420 size_t offset,
421 size_t maxsize)
422 {
423 long res;
424 size_t bytes_this_round;
425 struct vchiq_io_copy_callback_context *copy_context =
426 (struct vchiq_io_copy_callback_context *)context;
427
428 if (offset != copy_context->current_offset)
429 return 0;
430
431 if (!copy_context->elements_to_go)
432 return 0;
433
434 /*
435 * Complex logic here to handle the case of 0 size elements
436 * in the middle of the array of elements.
437 *
438 * Need to skip over these 0 size elements.
439 */
440 while (1) {
441 bytes_this_round = min(copy_context->current_element->size -
442 copy_context->current_element_offset,
443 maxsize);
444
445 if (bytes_this_round)
446 break;
447
448 copy_context->elements_to_go--;
449 copy_context->current_element++;
450 copy_context->current_element_offset = 0;
451
452 if (!copy_context->elements_to_go)
453 return 0;
454 }
455
456 res = copy_from_user(dest,
457 copy_context->current_element->data +
458 copy_context->current_element_offset,
459 bytes_this_round);
460
461 if (res != 0)
462 return -EFAULT;
463
464 copy_context->current_element_offset += bytes_this_round;
465 copy_context->current_offset += bytes_this_round;
466
467 /*
468 * Check if done with current element, and if so advance to the next.
469 */
470 if (copy_context->current_element_offset ==
471 copy_context->current_element->size) {
472 copy_context->elements_to_go--;
473 copy_context->current_element++;
474 copy_context->current_element_offset = 0;
475 }
476
477 return bytes_this_round;
478 }
479
480 /**************************************************************************
481 *
482 * vchiq_ioc_queue_message
483 *
484 **************************************************************************/
485 static VCHIQ_STATUS_T
486 vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
487 VCHIQ_ELEMENT_T *elements,
488 unsigned long count)
489 {
490 struct vchiq_io_copy_callback_context context;
491 unsigned long i;
492 size_t total_size = 0;
493
494 context.current_element = elements;
495 context.current_element_offset = 0;
496 context.elements_to_go = count;
497 context.current_offset = 0;
498
499 for (i = 0; i < count; i++) {
500 if (!elements[i].data && elements[i].size != 0)
501 return -EFAULT;
502
503 total_size += elements[i].size;
504 }
505
506 return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
507 &context, total_size);
508 }
509
510 /****************************************************************************
511 *
512 * vchiq_ioctl
513 *
514 ***************************************************************************/
515 static long
516 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
517 {
518 VCHIQ_INSTANCE_T instance = file->private_data;
519 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
520 VCHIQ_SERVICE_T *service = NULL;
521 long ret = 0;
522 int i, rc;
523 DEBUG_INITIALISE(g_state.local)
524
525 vchiq_log_trace(vchiq_arm_log_level,
526 "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
527 instance,
528 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
529 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
530 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
531
532 switch (cmd) {
533 case VCHIQ_IOC_SHUTDOWN:
534 if (!instance->connected)
535 break;
536
537 /* Remove all services */
538 i = 0;
539 while ((service = next_service_by_instance(instance->state,
540 instance, &i)) != NULL) {
541 status = vchiq_remove_service(service->handle);
542 unlock_service(service);
543 if (status != VCHIQ_SUCCESS)
544 break;
545 }
546 service = NULL;
547
548 if (status == VCHIQ_SUCCESS) {
549 /* Wake the completion thread and ask it to exit */
550 instance->closing = 1;
551 up(&instance->insert_event);
552 }
553
554 break;
555
556 case VCHIQ_IOC_CONNECT:
557 if (instance->connected) {
558 ret = -EINVAL;
559 break;
560 }
561 rc = mutex_lock_killable(&instance->state->mutex);
562 if (rc != 0) {
563 vchiq_log_error(vchiq_arm_log_level,
564 "vchiq: connect: could not lock mutex for "
565 "state %d: %d",
566 instance->state->id, rc);
567 ret = -EINTR;
568 break;
569 }
570 status = vchiq_connect_internal(instance->state, instance);
571 mutex_unlock(&instance->state->mutex);
572
573 if (status == VCHIQ_SUCCESS)
574 instance->connected = 1;
575 else
576 vchiq_log_error(vchiq_arm_log_level,
577 "vchiq: could not connect: %d", status);
578 break;
579
580 case VCHIQ_IOC_CREATE_SERVICE: {
581 VCHIQ_CREATE_SERVICE_T args;
582 USER_SERVICE_T *user_service = NULL;
583 void *userdata;
584 int srvstate;
585
586 if (copy_from_user
587 (&args, (const void __user *)arg,
588 sizeof(args)) != 0) {
589 ret = -EFAULT;
590 break;
591 }
592
593 user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
594 if (!user_service) {
595 ret = -ENOMEM;
596 break;
597 }
598
599 if (args.is_open) {
600 if (!instance->connected) {
601 ret = -ENOTCONN;
602 kfree(user_service);
603 break;
604 }
605 srvstate = VCHIQ_SRVSTATE_OPENING;
606 } else {
607 srvstate =
608 instance->connected ?
609 VCHIQ_SRVSTATE_LISTENING :
610 VCHIQ_SRVSTATE_HIDDEN;
611 }
612
613 userdata = args.params.userdata;
614 args.params.callback = service_callback;
615 args.params.userdata = user_service;
616 service = vchiq_add_service_internal(
617 instance->state,
618 &args.params, srvstate,
619 instance, user_service_free);
620
621 if (service != NULL) {
622 user_service->service = service;
623 user_service->userdata = userdata;
624 user_service->instance = instance;
625 user_service->is_vchi = (args.is_vchi != 0);
626 user_service->dequeue_pending = 0;
627 user_service->close_pending = 0;
628 user_service->message_available_pos =
629 instance->completion_remove - 1;
630 user_service->msg_insert = 0;
631 user_service->msg_remove = 0;
632 sema_init(&user_service->insert_event, 0);
633 sema_init(&user_service->remove_event, 0);
634 sema_init(&user_service->close_event, 0);
635
636 if (args.is_open) {
637 status = vchiq_open_service_internal
638 (service, instance->pid);
639 if (status != VCHIQ_SUCCESS) {
640 vchiq_remove_service(service->handle);
641 service = NULL;
642 ret = (status == VCHIQ_RETRY) ?
643 -EINTR : -EIO;
644 break;
645 }
646 }
647
648 if (copy_to_user((void __user *)
649 &(((VCHIQ_CREATE_SERVICE_T __user *)
650 arg)->handle),
651 (const void *)&service->handle,
652 sizeof(service->handle)) != 0) {
653 ret = -EFAULT;
654 vchiq_remove_service(service->handle);
655 }
656
657 service = NULL;
658 } else {
659 ret = -EEXIST;
660 kfree(user_service);
661 }
662 } break;
663
664 case VCHIQ_IOC_CLOSE_SERVICE: {
665 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
666
667 service = find_service_for_instance(instance, handle);
668 if (service != NULL) {
669 USER_SERVICE_T *user_service =
670 (USER_SERVICE_T *)service->base.userdata;
671 /* close_pending is false on first entry, and when the
672 wait in vchiq_close_service has been interrupted. */
673 if (!user_service->close_pending) {
674 status = vchiq_close_service(service->handle);
675 if (status != VCHIQ_SUCCESS)
676 break;
677 }
678
679 /* close_pending is true once the underlying service
680 has been closed until the client library calls the
681 CLOSE_DELIVERED ioctl, signalling close_event. */
682 if (user_service->close_pending &&
683 down_interruptible(&user_service->close_event))
684 status = VCHIQ_RETRY;
685 }
686 else
687 ret = -EINVAL;
688 } break;
689
690 case VCHIQ_IOC_REMOVE_SERVICE: {
691 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
692
693 service = find_service_for_instance(instance, handle);
694 if (service != NULL) {
695 USER_SERVICE_T *user_service =
696 (USER_SERVICE_T *)service->base.userdata;
697 /* close_pending is false on first entry, and when the
698 wait in vchiq_close_service has been interrupted. */
699 if (!user_service->close_pending) {
700 status = vchiq_remove_service(service->handle);
701 if (status != VCHIQ_SUCCESS)
702 break;
703 }
704
705 /* close_pending is true once the underlying service
706 has been closed until the client library calls the
707 CLOSE_DELIVERED ioctl, signalling close_event. */
708 if (user_service->close_pending &&
709 down_interruptible(&user_service->close_event))
710 status = VCHIQ_RETRY;
711 }
712 else
713 ret = -EINVAL;
714 } break;
715
716 case VCHIQ_IOC_USE_SERVICE:
717 case VCHIQ_IOC_RELEASE_SERVICE: {
718 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
719
720 service = find_service_for_instance(instance, handle);
721 if (service != NULL) {
722 status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
723 vchiq_use_service_internal(service) :
724 vchiq_release_service_internal(service);
725 if (status != VCHIQ_SUCCESS) {
726 vchiq_log_error(vchiq_susp_log_level,
727 "%s: cmd %s returned error %d for "
728 "service %c%c%c%c:%03d",
729 __func__,
730 (cmd == VCHIQ_IOC_USE_SERVICE) ?
731 "VCHIQ_IOC_USE_SERVICE" :
732 "VCHIQ_IOC_RELEASE_SERVICE",
733 status,
734 VCHIQ_FOURCC_AS_4CHARS(
735 service->base.fourcc),
736 service->client_id);
737 ret = -EINVAL;
738 }
739 } else
740 ret = -EINVAL;
741 } break;
742
743 case VCHIQ_IOC_QUEUE_MESSAGE: {
744 VCHIQ_QUEUE_MESSAGE_T args;
745 if (copy_from_user
746 (&args, (const void __user *)arg,
747 sizeof(args)) != 0) {
748 ret = -EFAULT;
749 break;
750 }
751
752 service = find_service_for_instance(instance, args.handle);
753
754 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
755 /* Copy elements into kernel space */
756 VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
757 if (copy_from_user(elements, args.elements,
758 args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
759 status = vchiq_ioc_queue_message
760 (args.handle,
761 elements, args.count);
762 else
763 ret = -EFAULT;
764 } else {
765 ret = -EINVAL;
766 }
767 } break;
768
769 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
770 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
771 VCHIQ_QUEUE_BULK_TRANSFER_T args;
772 struct bulk_waiter_node *waiter = NULL;
773 VCHIQ_BULK_DIR_T dir =
774 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
775 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
776
777 if (copy_from_user
778 (&args, (const void __user *)arg,
779 sizeof(args)) != 0) {
780 ret = -EFAULT;
781 break;
782 }
783
784 service = find_service_for_instance(instance, args.handle);
785 if (!service) {
786 ret = -EINVAL;
787 break;
788 }
789
790 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
791 waiter = kzalloc(sizeof(struct bulk_waiter_node),
792 GFP_KERNEL);
793 if (!waiter) {
794 ret = -ENOMEM;
795 break;
796 }
797 args.userdata = &waiter->bulk_waiter;
798 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
799 struct list_head *pos;
800 mutex_lock(&instance->bulk_waiter_list_mutex);
801 list_for_each(pos, &instance->bulk_waiter_list) {
802 if (list_entry(pos, struct bulk_waiter_node,
803 list)->pid == current->pid) {
804 waiter = list_entry(pos,
805 struct bulk_waiter_node,
806 list);
807 list_del(pos);
808 break;
809 }
810
811 }
812 mutex_unlock(&instance->bulk_waiter_list_mutex);
813 if (!waiter) {
814 vchiq_log_error(vchiq_arm_log_level,
815 "no bulk_waiter found for pid %d",
816 current->pid);
817 ret = -ESRCH;
818 break;
819 }
820 vchiq_log_info(vchiq_arm_log_level,
821 "found bulk_waiter %pK for pid %d", waiter,
822 current->pid);
823 args.userdata = &waiter->bulk_waiter;
824 }
825 status = vchiq_bulk_transfer
826 (args.handle,
827 VCHI_MEM_HANDLE_INVALID,
828 args.data, args.size,
829 args.userdata, args.mode,
830 dir);
831 if (!waiter)
832 break;
833 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
834 !waiter->bulk_waiter.bulk) {
835 if (waiter->bulk_waiter.bulk) {
836 /* Cancel the signal when the transfer
837 ** completes. */
838 spin_lock(&bulk_waiter_spinlock);
839 waiter->bulk_waiter.bulk->userdata = NULL;
840 spin_unlock(&bulk_waiter_spinlock);
841 }
842 kfree(waiter);
843 } else {
844 const VCHIQ_BULK_MODE_T mode_waiting =
845 VCHIQ_BULK_MODE_WAITING;
846 waiter->pid = current->pid;
847 mutex_lock(&instance->bulk_waiter_list_mutex);
848 list_add(&waiter->list, &instance->bulk_waiter_list);
849 mutex_unlock(&instance->bulk_waiter_list_mutex);
850 vchiq_log_info(vchiq_arm_log_level,
851 "saved bulk_waiter %pK for pid %d",
852 waiter, current->pid);
853
854 if (copy_to_user((void __user *)
855 &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
856 arg)->mode),
857 (const void *)&mode_waiting,
858 sizeof(mode_waiting)) != 0)
859 ret = -EFAULT;
860 }
861 } break;
862
863 case VCHIQ_IOC_AWAIT_COMPLETION: {
864 VCHIQ_AWAIT_COMPLETION_T args;
865
866 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
867 if (!instance->connected) {
868 ret = -ENOTCONN;
869 break;
870 }
871
872 if (copy_from_user(&args, (const void __user *)arg,
873 sizeof(args)) != 0) {
874 ret = -EFAULT;
875 break;
876 }
877
878 mutex_lock(&instance->completion_mutex);
879
880 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
881 while ((instance->completion_remove ==
882 instance->completion_insert)
883 && !instance->closing) {
884 int rc;
885 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
886 mutex_unlock(&instance->completion_mutex);
887 rc = down_interruptible(&instance->insert_event);
888 mutex_lock(&instance->completion_mutex);
889 if (rc != 0) {
890 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
891 vchiq_log_info(vchiq_arm_log_level,
892 "AWAIT_COMPLETION interrupted");
893 ret = -EINTR;
894 break;
895 }
896 }
897 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
898
899 if (ret == 0) {
900 int msgbufcount = args.msgbufcount;
901 int remove = instance->completion_remove;
902
903 for (ret = 0; ret < args.count; ret++) {
904 VCHIQ_COMPLETION_DATA_T *completion;
905 VCHIQ_SERVICE_T *service;
906 USER_SERVICE_T *user_service;
907 VCHIQ_HEADER_T *header;
908
909 if (remove == instance->completion_insert)
910 break;
911
912 completion = &instance->completions[
913 remove & (MAX_COMPLETIONS - 1)];
914
915 /*
916 * A read memory barrier is needed to stop
917 * prefetch of a stale completion record
918 */
919 rmb();
920
921 service = completion->service_userdata;
922 user_service = service->base.userdata;
923 completion->service_userdata =
924 user_service->userdata;
925
926 header = completion->header;
927 if (header) {
928 void __user *msgbuf;
929 int msglen;
930
931 msglen = header->size +
932 sizeof(VCHIQ_HEADER_T);
933 /* This must be a VCHIQ-style service */
934 if (args.msgbufsize < msglen) {
935 vchiq_log_error(
936 vchiq_arm_log_level,
937 "header %pK: msgbufsize %x < msglen %x",
938 header, args.msgbufsize,
939 msglen);
940 WARN(1, "invalid message "
941 "size\n");
942 if (ret == 0)
943 ret = -EMSGSIZE;
944 break;
945 }
946 if (msgbufcount <= 0)
947 /* Stall here for lack of a
948 ** buffer for the message. */
949 break;
950 /* Get the pointer from user space */
951 msgbufcount--;
952 if (copy_from_user(&msgbuf,
953 (const void __user *)
954 &args.msgbufs[msgbufcount],
955 sizeof(msgbuf)) != 0) {
956 if (ret == 0)
957 ret = -EFAULT;
958 break;
959 }
960
961 /* Copy the message to user space */
962 if (copy_to_user(msgbuf, header,
963 msglen) != 0) {
964 if (ret == 0)
965 ret = -EFAULT;
966 break;
967 }
968
969 /* Now it has been copied, the message
970 ** can be released. */
971 vchiq_release_message(service->handle,
972 header);
973
974 /* The completion must point to the
975 ** msgbuf. */
976 completion->header = msgbuf;
977 }
978
979 if ((completion->reason ==
980 VCHIQ_SERVICE_CLOSED) &&
981 !instance->use_close_delivered)
982 unlock_service(service);
983
984 if (copy_to_user((void __user *)(
985 (size_t)args.buf +
986 ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
987 completion,
988 sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
989 if (ret == 0)
990 ret = -EFAULT;
991 break;
992 }
993
994 /*
995 * Ensure that the above copy has completed
996 * before advancing the remove pointer.
997 */
998 mb();
999 remove++;
1000 instance->completion_remove = remove;
1001 }
1002
1003 if (msgbufcount != args.msgbufcount) {
1004 if (copy_to_user((void __user *)
1005 &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
1006 msgbufcount,
1007 &msgbufcount,
1008 sizeof(msgbufcount)) != 0) {
1009 ret = -EFAULT;
1010 }
1011 }
1012 }
1013
1014 if (ret != 0)
1015 up(&instance->remove_event);
1016 mutex_unlock(&instance->completion_mutex);
1017 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1018 } break;
1019
1020 case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1021 VCHIQ_DEQUEUE_MESSAGE_T args;
1022 USER_SERVICE_T *user_service;
1023 VCHIQ_HEADER_T *header;
1024
1025 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1026 if (copy_from_user
1027 (&args, (const void __user *)arg,
1028 sizeof(args)) != 0) {
1029 ret = -EFAULT;
1030 break;
1031 }
1032 service = find_service_for_instance(instance, args.handle);
1033 if (!service) {
1034 ret = -EINVAL;
1035 break;
1036 }
1037 user_service = (USER_SERVICE_T *)service->base.userdata;
1038 if (user_service->is_vchi == 0) {
1039 ret = -EINVAL;
1040 break;
1041 }
1042
1043 spin_lock(&msg_queue_spinlock);
1044 if (user_service->msg_remove == user_service->msg_insert) {
1045 if (!args.blocking) {
1046 spin_unlock(&msg_queue_spinlock);
1047 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1048 ret = -EWOULDBLOCK;
1049 break;
1050 }
1051 user_service->dequeue_pending = 1;
1052 do {
1053 spin_unlock(&msg_queue_spinlock);
1054 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1055 if (down_interruptible(
1056 &user_service->insert_event) != 0) {
1057 vchiq_log_info(vchiq_arm_log_level,
1058 "DEQUEUE_MESSAGE interrupted");
1059 ret = -EINTR;
1060 break;
1061 }
1062 spin_lock(&msg_queue_spinlock);
1063 } while (user_service->msg_remove ==
1064 user_service->msg_insert);
1065
1066 if (ret)
1067 break;
1068 }
1069
1070 BUG_ON((int)(user_service->msg_insert -
1071 user_service->msg_remove) < 0);
1072
1073 header = user_service->msg_queue[user_service->msg_remove &
1074 (MSG_QUEUE_SIZE - 1)];
1075 user_service->msg_remove++;
1076 spin_unlock(&msg_queue_spinlock);
1077
1078 up(&user_service->remove_event);
1079 if (header == NULL)
1080 ret = -ENOTCONN;
1081 else if (header->size <= args.bufsize) {
1082 /* Copy to user space if msgbuf is not NULL */
1083 if ((args.buf == NULL) ||
1084 (copy_to_user((void __user *)args.buf,
1085 header->data,
1086 header->size) == 0)) {
1087 ret = header->size;
1088 vchiq_release_message(
1089 service->handle,
1090 header);
1091 } else
1092 ret = -EFAULT;
1093 } else {
1094 vchiq_log_error(vchiq_arm_log_level,
1095 "header %pK: bufsize %x < size %x",
1096 header, args.bufsize, header->size);
1097 WARN(1, "invalid size\n");
1098 ret = -EMSGSIZE;
1099 }
1100 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1101 } break;
1102
1103 case VCHIQ_IOC_GET_CLIENT_ID: {
1104 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1105
1106 ret = vchiq_get_client_id(handle);
1107 } break;
1108
1109 case VCHIQ_IOC_GET_CONFIG: {
1110 VCHIQ_GET_CONFIG_T args;
1111 VCHIQ_CONFIG_T config;
1112
1113 if (copy_from_user(&args, (const void __user *)arg,
1114 sizeof(args)) != 0) {
1115 ret = -EFAULT;
1116 break;
1117 }
1118 if (args.config_size > sizeof(config)) {
1119 ret = -EINVAL;
1120 break;
1121 }
1122 status = vchiq_get_config(instance, args.config_size, &config);
1123 if (status == VCHIQ_SUCCESS) {
1124 if (copy_to_user((void __user *)args.pconfig,
1125 &config, args.config_size) != 0) {
1126 ret = -EFAULT;
1127 break;
1128 }
1129 }
1130 } break;
1131
1132 case VCHIQ_IOC_SET_SERVICE_OPTION: {
1133 VCHIQ_SET_SERVICE_OPTION_T args;
1134
1135 if (copy_from_user(
1136 &args, (const void __user *)arg,
1137 sizeof(args)) != 0) {
1138 ret = -EFAULT;
1139 break;
1140 }
1141
1142 service = find_service_for_instance(instance, args.handle);
1143 if (!service) {
1144 ret = -EINVAL;
1145 break;
1146 }
1147
1148 status = vchiq_set_service_option(
1149 args.handle, args.option, args.value);
1150 } break;
1151
1152 case VCHIQ_IOC_DUMP_PHYS_MEM: {
1153 VCHIQ_DUMP_MEM_T args;
1154
1155 if (copy_from_user
1156 (&args, (const void __user *)arg,
1157 sizeof(args)) != 0) {
1158 ret = -EFAULT;
1159 break;
1160 }
1161 dump_phys_mem(args.virt_addr, args.num_bytes);
1162 } break;
1163
1164 case VCHIQ_IOC_LIB_VERSION: {
1165 unsigned int lib_version = (unsigned int)arg;
1166
1167 if (lib_version < VCHIQ_VERSION_MIN)
1168 ret = -EINVAL;
1169 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1170 instance->use_close_delivered = 1;
1171 } break;
1172
1173 case VCHIQ_IOC_CLOSE_DELIVERED: {
1174 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1175
1176 service = find_closed_service_for_instance(instance, handle);
1177 if (service != NULL) {
1178 USER_SERVICE_T *user_service =
1179 (USER_SERVICE_T *)service->base.userdata;
1180 close_delivered(user_service);
1181 }
1182 else
1183 ret = -EINVAL;
1184 } break;
1185
1186 default:
1187 ret = -ENOTTY;
1188 break;
1189 }
1190
1191 if (service)
1192 unlock_service(service);
1193
1194 if (ret == 0) {
1195 if (status == VCHIQ_ERROR)
1196 ret = -EIO;
1197 else if (status == VCHIQ_RETRY)
1198 ret = -EINTR;
1199 }
1200
1201 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1202 (ret != -EWOULDBLOCK))
1203 vchiq_log_info(vchiq_arm_log_level,
1204 " ioctl instance %lx, cmd %s -> status %d, %ld",
1205 (unsigned long)instance,
1206 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1207 ioctl_names[_IOC_NR(cmd)] :
1208 "<invalid>",
1209 status, ret);
1210 else
1211 vchiq_log_trace(vchiq_arm_log_level,
1212 " ioctl instance %lx, cmd %s -> status %d, %ld",
1213 (unsigned long)instance,
1214 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1215 ioctl_names[_IOC_NR(cmd)] :
1216 "<invalid>",
1217 status, ret);
1218
1219 return ret;
1220 }
1221
1222 /****************************************************************************
1223 *
1224 * vchiq_open
1225 *
1226 ***************************************************************************/
1227
1228 static int
1229 vchiq_open(struct inode *inode, struct file *file)
1230 {
1231 int dev = iminor(inode) & 0x0f;
1232 vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1233 switch (dev) {
1234 case VCHIQ_MINOR: {
1235 int ret;
1236 VCHIQ_STATE_T *state = vchiq_get_state();
1237 VCHIQ_INSTANCE_T instance;
1238
1239 if (!state) {
1240 vchiq_log_error(vchiq_arm_log_level,
1241 "vchiq has no connection to VideoCore");
1242 return -ENOTCONN;
1243 }
1244
1245 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1246 if (!instance)
1247 return -ENOMEM;
1248
1249 instance->state = state;
1250 instance->pid = current->tgid;
1251
1252 ret = vchiq_debugfs_add_instance(instance);
1253 if (ret != 0) {
1254 kfree(instance);
1255 return ret;
1256 }
1257
1258 sema_init(&instance->insert_event, 0);
1259 sema_init(&instance->remove_event, 0);
1260 mutex_init(&instance->completion_mutex);
1261 mutex_init(&instance->bulk_waiter_list_mutex);
1262 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1263
1264 file->private_data = instance;
1265 } break;
1266
1267 default:
1268 vchiq_log_error(vchiq_arm_log_level,
1269 "Unknown minor device: %d", dev);
1270 return -ENXIO;
1271 }
1272
1273 return 0;
1274 }
1275
1276 /****************************************************************************
1277 *
1278 * vchiq_release
1279 *
1280 ***************************************************************************/
1281
1282 static int
1283 vchiq_release(struct inode *inode, struct file *file)
1284 {
1285 int dev = iminor(inode) & 0x0f;
1286 int ret = 0;
1287 switch (dev) {
1288 case VCHIQ_MINOR: {
1289 VCHIQ_INSTANCE_T instance = file->private_data;
1290 VCHIQ_STATE_T *state = vchiq_get_state();
1291 VCHIQ_SERVICE_T *service;
1292 int i;
1293
1294 vchiq_log_info(vchiq_arm_log_level,
1295 "vchiq_release: instance=%lx",
1296 (unsigned long)instance);
1297
1298 if (!state) {
1299 ret = -EPERM;
1300 goto out;
1301 }
1302
1303 /* Ensure videocore is awake to allow termination. */
1304 vchiq_use_internal(instance->state, NULL,
1305 USE_TYPE_VCHIQ);
1306
1307 mutex_lock(&instance->completion_mutex);
1308
1309 /* Wake the completion thread and ask it to exit */
1310 instance->closing = 1;
1311 up(&instance->insert_event);
1312
1313 mutex_unlock(&instance->completion_mutex);
1314
1315 /* Wake the slot handler if the completion queue is full. */
1316 up(&instance->remove_event);
1317
1318 /* Mark all services for termination... */
1319 i = 0;
1320 while ((service = next_service_by_instance(state, instance,
1321 &i)) != NULL) {
1322 USER_SERVICE_T *user_service = service->base.userdata;
1323
1324 /* Wake the slot handler if the msg queue is full. */
1325 up(&user_service->remove_event);
1326
1327 vchiq_terminate_service_internal(service);
1328 unlock_service(service);
1329 }
1330
1331 /* ...and wait for them to die */
1332 i = 0;
1333 while ((service = next_service_by_instance(state, instance, &i))
1334 != NULL) {
1335 USER_SERVICE_T *user_service = service->base.userdata;
1336
1337 down(&service->remove_event);
1338
1339 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1340
1341 spin_lock(&msg_queue_spinlock);
1342
1343 while (user_service->msg_remove !=
1344 user_service->msg_insert) {
1345 VCHIQ_HEADER_T *header = user_service->
1346 msg_queue[user_service->msg_remove &
1347 (MSG_QUEUE_SIZE - 1)];
1348 user_service->msg_remove++;
1349 spin_unlock(&msg_queue_spinlock);
1350
1351 if (header)
1352 vchiq_release_message(
1353 service->handle,
1354 header);
1355 spin_lock(&msg_queue_spinlock);
1356 }
1357
1358 spin_unlock(&msg_queue_spinlock);
1359
1360 unlock_service(service);
1361 }
1362
1363 /* Release any closed services */
1364 while (instance->completion_remove !=
1365 instance->completion_insert) {
1366 VCHIQ_COMPLETION_DATA_T *completion;
1367 VCHIQ_SERVICE_T *service;
1368 completion = &instance->completions[
1369 instance->completion_remove &
1370 (MAX_COMPLETIONS - 1)];
1371 service = completion->service_userdata;
1372 if (completion->reason == VCHIQ_SERVICE_CLOSED)
1373 {
1374 USER_SERVICE_T *user_service =
1375 service->base.userdata;
1376
1377 /* Wake any blocked user-thread */
1378 if (instance->use_close_delivered)
1379 up(&user_service->close_event);
1380 unlock_service(service);
1381 }
1382 instance->completion_remove++;
1383 }
1384
1385 /* Release the PEER service count. */
1386 vchiq_release_internal(instance->state, NULL);
1387
1388 {
1389 struct list_head *pos, *next;
1390 list_for_each_safe(pos, next,
1391 &instance->bulk_waiter_list) {
1392 struct bulk_waiter_node *waiter;
1393 waiter = list_entry(pos,
1394 struct bulk_waiter_node,
1395 list);
1396 list_del(pos);
1397 vchiq_log_info(vchiq_arm_log_level,
1398 "bulk_waiter - cleaned up %pK for pid %d",
1399 waiter, waiter->pid);
1400 kfree(waiter);
1401 }
1402 }
1403
1404 vchiq_debugfs_remove_instance(instance);
1405
1406 kfree(instance);
1407 file->private_data = NULL;
1408 } break;
1409
1410 default:
1411 vchiq_log_error(vchiq_arm_log_level,
1412 "Unknown minor device: %d", dev);
1413 ret = -ENXIO;
1414 }
1415
1416 out:
1417 return ret;
1418 }
1419
1420 /****************************************************************************
1421 *
1422 * vchiq_dump
1423 *
1424 ***************************************************************************/
1425
1426 void
1427 vchiq_dump(void *dump_context, const char *str, int len)
1428 {
1429 DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
1430
1431 if (context->actual < context->space) {
1432 int copy_bytes;
1433 if (context->offset > 0) {
1434 int skip_bytes = min(len, (int)context->offset);
1435 str += skip_bytes;
1436 len -= skip_bytes;
1437 context->offset -= skip_bytes;
1438 if (context->offset > 0)
1439 return;
1440 }
1441 copy_bytes = min(len, (int)(context->space - context->actual));
1442 if (copy_bytes == 0)
1443 return;
1444 if (copy_to_user(context->buf + context->actual, str,
1445 copy_bytes))
1446 context->actual = -EFAULT;
1447 context->actual += copy_bytes;
1448 len -= copy_bytes;
1449
1450 /* If tne terminating NUL is included in the length, then it
1451 ** marks the end of a line and should be replaced with a
1452 ** carriage return. */
1453 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1454 char cr = '\n';
1455 if (copy_to_user(context->buf + context->actual - 1,
1456 &cr, 1))
1457 context->actual = -EFAULT;
1458 }
1459 }
1460 }
1461
1462 /****************************************************************************
1463 *
1464 * vchiq_dump_platform_instance_state
1465 *
1466 ***************************************************************************/
1467
1468 void
1469 vchiq_dump_platform_instances(void *dump_context)
1470 {
1471 VCHIQ_STATE_T *state = vchiq_get_state();
1472 char buf[80];
1473 int len;
1474 int i;
1475
1476 /* There is no list of instances, so instead scan all services,
1477 marking those that have been dumped. */
1478
1479 for (i = 0; i < state->unused_service; i++) {
1480 VCHIQ_SERVICE_T *service = state->services[i];
1481 VCHIQ_INSTANCE_T instance;
1482
1483 if (service && (service->base.callback == service_callback)) {
1484 instance = service->instance;
1485 if (instance)
1486 instance->mark = 0;
1487 }
1488 }
1489
1490 for (i = 0; i < state->unused_service; i++) {
1491 VCHIQ_SERVICE_T *service = state->services[i];
1492 VCHIQ_INSTANCE_T instance;
1493
1494 if (service && (service->base.callback == service_callback)) {
1495 instance = service->instance;
1496 if (instance && !instance->mark) {
1497 len = snprintf(buf, sizeof(buf),
1498 "Instance %pK: pid %d,%s completions %d/%d",
1499 instance, instance->pid,
1500 instance->connected ? " connected, " :
1501 "",
1502 instance->completion_insert -
1503 instance->completion_remove,
1504 MAX_COMPLETIONS);
1505
1506 vchiq_dump(dump_context, buf, len + 1);
1507
1508 instance->mark = 1;
1509 }
1510 }
1511 }
1512 }
1513
1514 /****************************************************************************
1515 *
1516 * vchiq_dump_platform_service_state
1517 *
1518 ***************************************************************************/
1519
1520 void
1521 vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
1522 {
1523 USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
1524 char buf[80];
1525 int len;
1526
1527 len = snprintf(buf, sizeof(buf), " instance %pK", service->instance);
1528
1529 if ((service->base.callback == service_callback) &&
1530 user_service->is_vchi) {
1531 len += snprintf(buf + len, sizeof(buf) - len,
1532 ", %d/%d messages",
1533 user_service->msg_insert - user_service->msg_remove,
1534 MSG_QUEUE_SIZE);
1535
1536 if (user_service->dequeue_pending)
1537 len += snprintf(buf + len, sizeof(buf) - len,
1538 " (dequeue pending)");
1539 }
1540
1541 vchiq_dump(dump_context, buf, len + 1);
1542 }
1543
1544 /****************************************************************************
1545 *
1546 * dump_user_mem
1547 *
1548 ***************************************************************************/
1549
1550 static void
1551 dump_phys_mem(void *virt_addr, u32 num_bytes)
1552 {
1553 int rc;
1554 u8 *end_virt_addr = virt_addr + num_bytes;
1555 int num_pages;
1556 int offset;
1557 int end_offset;
1558 int page_idx;
1559 int prev_idx;
1560 struct page *page;
1561 struct page **pages;
1562 u8 *kmapped_virt_ptr;
1563
1564 /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
1565
1566 virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
1567 end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
1568 ~0x0fuL);
1569
1570 offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
1571 end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
1572
1573 num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1574
1575 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1576 if (pages == NULL) {
1577 vchiq_log_error(vchiq_arm_log_level,
1578 "Unable to allocation memory for %d pages\n",
1579 num_pages);
1580 return;
1581 }
1582
1583 down_read(&current->mm->mmap_sem);
1584 rc = get_user_pages(
1585 (unsigned long)virt_addr, /* start */
1586 num_pages, /* len */
1587 0, /* gup_flags */
1588 pages, /* pages (array of page pointers) */
1589 NULL); /* vmas */
1590 up_read(&current->mm->mmap_sem);
1591
1592 prev_idx = -1;
1593 page = NULL;
1594
1595 if (rc < 0) {
1596 vchiq_log_error(vchiq_arm_log_level,
1597 "Failed to get user pages: %d\n", rc);
1598 goto out;
1599 }
1600
1601 while (offset < end_offset) {
1602
1603 int page_offset = offset % PAGE_SIZE;
1604 page_idx = offset / PAGE_SIZE;
1605
1606 if (page_idx != prev_idx) {
1607
1608 if (page != NULL)
1609 kunmap(page);
1610 page = pages[page_idx];
1611 kmapped_virt_ptr = kmap(page);
1612
1613 prev_idx = page_idx;
1614 }
1615
1616 if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
1617 vchiq_log_dump_mem("ph",
1618 (u32)(unsigned long)&kmapped_virt_ptr[
1619 page_offset],
1620 &kmapped_virt_ptr[page_offset], 16);
1621
1622 offset += 16;
1623 }
1624
1625 out:
1626 if (page != NULL)
1627 kunmap(page);
1628
1629 for (page_idx = 0; page_idx < num_pages; page_idx++)
1630 put_page(pages[page_idx]);
1631
1632 kfree(pages);
1633 }
1634
1635 /****************************************************************************
1636 *
1637 * vchiq_read
1638 *
1639 ***************************************************************************/
1640
1641 static ssize_t
1642 vchiq_read(struct file *file, char __user *buf,
1643 size_t count, loff_t *ppos)
1644 {
1645 DUMP_CONTEXT_T context;
1646 context.buf = buf;
1647 context.actual = 0;
1648 context.space = count;
1649 context.offset = *ppos;
1650
1651 vchiq_dump_state(&context, &g_state);
1652
1653 *ppos += context.actual;
1654
1655 return context.actual;
1656 }
1657
1658 VCHIQ_STATE_T *
1659 vchiq_get_state(void)
1660 {
1661
1662 if (g_state.remote == NULL)
1663 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
1664 else if (g_state.remote->initialised != 1)
1665 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
1666 __func__, g_state.remote->initialised);
1667
1668 return ((g_state.remote != NULL) &&
1669 (g_state.remote->initialised == 1)) ? &g_state : NULL;
1670 }
1671
1672 static const struct file_operations
1673 vchiq_fops = {
1674 .owner = THIS_MODULE,
1675 .unlocked_ioctl = vchiq_ioctl,
1676 .open = vchiq_open,
1677 .release = vchiq_release,
1678 .read = vchiq_read
1679 };
1680
1681 /*
1682 * Autosuspend related functionality
1683 */
1684
1685 int
1686 vchiq_videocore_wanted(VCHIQ_STATE_T *state)
1687 {
1688 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1689 if (!arm_state)
1690 /* autosuspend not supported - always return wanted */
1691 return 1;
1692 else if (arm_state->blocked_count)
1693 return 1;
1694 else if (!arm_state->videocore_use_count)
1695 /* usage count zero - check for override unless we're forcing */
1696 if (arm_state->resume_blocked)
1697 return 0;
1698 else
1699 return vchiq_platform_videocore_wanted(state);
1700 else
1701 /* non-zero usage count - videocore still required */
1702 return 1;
1703 }
1704
1705 static VCHIQ_STATUS_T
1706 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
1707 VCHIQ_HEADER_T *header,
1708 VCHIQ_SERVICE_HANDLE_T service_user,
1709 void *bulk_user)
1710 {
1711 vchiq_log_error(vchiq_susp_log_level,
1712 "%s callback reason %d", __func__, reason);
1713 return 0;
1714 }
1715
1716 static int
1717 vchiq_keepalive_thread_func(void *v)
1718 {
1719 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
1720 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1721
1722 VCHIQ_STATUS_T status;
1723 VCHIQ_INSTANCE_T instance;
1724 VCHIQ_SERVICE_HANDLE_T ka_handle;
1725
1726 VCHIQ_SERVICE_PARAMS_T params = {
1727 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1728 .callback = vchiq_keepalive_vchiq_callback,
1729 .version = KEEPALIVE_VER,
1730 .version_min = KEEPALIVE_VER_MIN
1731 };
1732
1733 status = vchiq_initialise(&instance);
1734 if (status != VCHIQ_SUCCESS) {
1735 vchiq_log_error(vchiq_susp_log_level,
1736 "%s vchiq_initialise failed %d", __func__, status);
1737 goto exit;
1738 }
1739
1740 status = vchiq_connect(instance);
1741 if (status != VCHIQ_SUCCESS) {
1742 vchiq_log_error(vchiq_susp_log_level,
1743 "%s vchiq_connect failed %d", __func__, status);
1744 goto shutdown;
1745 }
1746
1747 status = vchiq_add_service(instance, &params, &ka_handle);
1748 if (status != VCHIQ_SUCCESS) {
1749 vchiq_log_error(vchiq_susp_log_level,
1750 "%s vchiq_open_service failed %d", __func__, status);
1751 goto shutdown;
1752 }
1753
1754 while (1) {
1755 long rc = 0, uc = 0;
1756 if (wait_for_completion_interruptible(&arm_state->ka_evt)
1757 != 0) {
1758 vchiq_log_error(vchiq_susp_log_level,
1759 "%s interrupted", __func__);
1760 flush_signals(current);
1761 continue;
1762 }
1763
1764 /* read and clear counters. Do release_count then use_count to
1765 * prevent getting more releases than uses */
1766 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1767 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1768
1769 /* Call use/release service the requisite number of times.
1770 * Process use before release so use counts don't go negative */
1771 while (uc--) {
1772 atomic_inc(&arm_state->ka_use_ack_count);
1773 status = vchiq_use_service(ka_handle);
1774 if (status != VCHIQ_SUCCESS) {
1775 vchiq_log_error(vchiq_susp_log_level,
1776 "%s vchiq_use_service error %d",
1777 __func__, status);
1778 }
1779 }
1780 while (rc--) {
1781 status = vchiq_release_service(ka_handle);
1782 if (status != VCHIQ_SUCCESS) {
1783 vchiq_log_error(vchiq_susp_log_level,
1784 "%s vchiq_release_service error %d",
1785 __func__, status);
1786 }
1787 }
1788 }
1789
1790 shutdown:
1791 vchiq_shutdown(instance);
1792 exit:
1793 return 0;
1794 }
1795
1796
1797
1798 VCHIQ_STATUS_T
1799 vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
1800 {
1801 if (arm_state) {
1802 rwlock_init(&arm_state->susp_res_lock);
1803
1804 init_completion(&arm_state->ka_evt);
1805 atomic_set(&arm_state->ka_use_count, 0);
1806 atomic_set(&arm_state->ka_use_ack_count, 0);
1807 atomic_set(&arm_state->ka_release_count, 0);
1808
1809 init_completion(&arm_state->vc_suspend_complete);
1810
1811 init_completion(&arm_state->vc_resume_complete);
1812 /* Initialise to 'done' state. We only want to block on resume
1813 * completion while videocore is suspended. */
1814 set_resume_state(arm_state, VC_RESUME_RESUMED);
1815
1816 init_completion(&arm_state->resume_blocker);
1817 /* Initialise to 'done' state. We only want to block on this
1818 * completion while resume is blocked */
1819 complete_all(&arm_state->resume_blocker);
1820
1821 init_completion(&arm_state->blocked_blocker);
1822 /* Initialise to 'done' state. We only want to block on this
1823 * completion while things are waiting on the resume blocker */
1824 complete_all(&arm_state->blocked_blocker);
1825
1826 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
1827 arm_state->suspend_timer_running = 0;
1828 setup_timer(&arm_state->suspend_timer, suspend_timer_callback,
1829 (unsigned long)(state));
1830
1831 arm_state->first_connect = 0;
1832
1833 }
1834 return VCHIQ_SUCCESS;
1835 }
1836
1837 /*
1838 ** Functions to modify the state variables;
1839 ** set_suspend_state
1840 ** set_resume_state
1841 **
1842 ** There are more state variables than we might like, so ensure they remain in
1843 ** step. Suspend and resume state are maintained separately, since most of
1844 ** these state machines can operate independently. However, there are a few
1845 ** states where state transitions in one state machine cause a reset to the
1846 ** other state machine. In addition, there are some completion events which
1847 ** need to occur on state machine reset and end-state(s), so these are also
1848 ** dealt with in these functions.
1849 **
1850 ** In all states we set the state variable according to the input, but in some
1851 ** cases we perform additional steps outlined below;
1852 **
1853 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
1854 ** The suspend completion is completed after any suspend
1855 ** attempt. When we reset the state machine we also reset
1856 ** the completion. This reset occurs when videocore is
1857 ** resumed, and also if we initiate suspend after a suspend
1858 ** failure.
1859 **
1860 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
1861 ** suspend - ie from this point on we must try to suspend
1862 ** before resuming can occur. We therefore also reset the
1863 ** resume state machine to VC_RESUME_IDLE in this state.
1864 **
1865 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
1866 ** complete_all on the suspend completion to notify
1867 ** anything waiting for suspend to happen.
1868 **
1869 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
1870 ** initiate resume, so no need to alter resume state.
1871 ** We call complete_all on the suspend completion to notify
1872 ** of suspend rejection.
1873 **
1874 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
1875 ** suspend completion and reset the resume state machine.
1876 **
1877 ** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
1878 ** resume completion is in it's 'done' state whenever
1879 ** videcore is running. Therefore, the VC_RESUME_IDLE
1880 ** state implies that videocore is suspended.
1881 ** Hence, any thread which needs to wait until videocore is
1882 ** running can wait on this completion - it will only block
1883 ** if videocore is suspended.
1884 **
1885 ** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
1886 ** Call complete_all on the resume completion to unblock
1887 ** any threads waiting for resume. Also reset the suspend
1888 ** state machine to it's idle state.
1889 **
1890 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
1891 */
1892
1893 void
1894 set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
1895 enum vc_suspend_status new_state)
1896 {
1897 /* set the state in all cases */
1898 arm_state->vc_suspend_state = new_state;
1899
1900 /* state specific additional actions */
1901 switch (new_state) {
1902 case VC_SUSPEND_FORCE_CANCELED:
1903 complete_all(&arm_state->vc_suspend_complete);
1904 break;
1905 case VC_SUSPEND_REJECTED:
1906 complete_all(&arm_state->vc_suspend_complete);
1907 break;
1908 case VC_SUSPEND_FAILED:
1909 complete_all(&arm_state->vc_suspend_complete);
1910 arm_state->vc_resume_state = VC_RESUME_RESUMED;
1911 complete_all(&arm_state->vc_resume_complete);
1912 break;
1913 case VC_SUSPEND_IDLE:
1914 reinit_completion(&arm_state->vc_suspend_complete);
1915 break;
1916 case VC_SUSPEND_REQUESTED:
1917 break;
1918 case VC_SUSPEND_IN_PROGRESS:
1919 set_resume_state(arm_state, VC_RESUME_IDLE);
1920 break;
1921 case VC_SUSPEND_SUSPENDED:
1922 complete_all(&arm_state->vc_suspend_complete);
1923 break;
1924 default:
1925 BUG();
1926 break;
1927 }
1928 }
1929
1930 void
1931 set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
1932 enum vc_resume_status new_state)
1933 {
1934 /* set the state in all cases */
1935 arm_state->vc_resume_state = new_state;
1936
1937 /* state specific additional actions */
1938 switch (new_state) {
1939 case VC_RESUME_FAILED:
1940 break;
1941 case VC_RESUME_IDLE:
1942 reinit_completion(&arm_state->vc_resume_complete);
1943 break;
1944 case VC_RESUME_REQUESTED:
1945 break;
1946 case VC_RESUME_IN_PROGRESS:
1947 break;
1948 case VC_RESUME_RESUMED:
1949 complete_all(&arm_state->vc_resume_complete);
1950 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
1951 break;
1952 default:
1953 BUG();
1954 break;
1955 }
1956 }
1957
1958
1959 /* should be called with the write lock held */
1960 inline void
1961 start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1962 {
1963 del_timer(&arm_state->suspend_timer);
1964 arm_state->suspend_timer.expires = jiffies +
1965 msecs_to_jiffies(arm_state->
1966 suspend_timer_timeout);
1967 add_timer(&arm_state->suspend_timer);
1968 arm_state->suspend_timer_running = 1;
1969 }
1970
1971 /* should be called with the write lock held */
1972 static inline void
1973 stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1974 {
1975 if (arm_state->suspend_timer_running) {
1976 del_timer(&arm_state->suspend_timer);
1977 arm_state->suspend_timer_running = 0;
1978 }
1979 }
1980
1981 static inline int
1982 need_resume(VCHIQ_STATE_T *state)
1983 {
1984 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1985 return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
1986 (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
1987 vchiq_videocore_wanted(state);
1988 }
1989
1990 static int
1991 block_resume(VCHIQ_ARM_STATE_T *arm_state)
1992 {
1993 int status = VCHIQ_SUCCESS;
1994 const unsigned long timeout_val =
1995 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
1996 int resume_count = 0;
1997
1998 /* Allow any threads which were blocked by the last force suspend to
1999 * complete if they haven't already. Only give this one shot; if
2000 * blocked_count is incremented after blocked_blocker is completed
2001 * (which only happens when blocked_count hits 0) then those threads
2002 * will have to wait until next time around */
2003 if (arm_state->blocked_count) {
2004 reinit_completion(&arm_state->blocked_blocker);
2005 write_unlock_bh(&arm_state->susp_res_lock);
2006 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
2007 "blocked clients", __func__);
2008 if (wait_for_completion_interruptible_timeout(
2009 &arm_state->blocked_blocker, timeout_val)
2010 <= 0) {
2011 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
2012 "previously blocked clients failed", __func__);
2013 status = VCHIQ_ERROR;
2014 write_lock_bh(&arm_state->susp_res_lock);
2015 goto out;
2016 }
2017 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
2018 "clients resumed", __func__);
2019 write_lock_bh(&arm_state->susp_res_lock);
2020 }
2021
2022 /* We need to wait for resume to complete if it's in process */
2023 while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
2024 arm_state->vc_resume_state > VC_RESUME_IDLE) {
2025 if (resume_count > 1) {
2026 status = VCHIQ_ERROR;
2027 vchiq_log_error(vchiq_susp_log_level, "%s waited too "
2028 "many times for resume", __func__);
2029 goto out;
2030 }
2031 write_unlock_bh(&arm_state->susp_res_lock);
2032 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
2033 __func__);
2034 if (wait_for_completion_interruptible_timeout(
2035 &arm_state->vc_resume_complete, timeout_val)
2036 <= 0) {
2037 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
2038 "resume failed (%s)", __func__,
2039 resume_state_names[arm_state->vc_resume_state +
2040 VC_RESUME_NUM_OFFSET]);
2041 status = VCHIQ_ERROR;
2042 write_lock_bh(&arm_state->susp_res_lock);
2043 goto out;
2044 }
2045 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
2046 write_lock_bh(&arm_state->susp_res_lock);
2047 resume_count++;
2048 }
2049 reinit_completion(&arm_state->resume_blocker);
2050 arm_state->resume_blocked = 1;
2051
2052 out:
2053 return status;
2054 }
2055
2056 static inline void
2057 unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
2058 {
2059 complete_all(&arm_state->resume_blocker);
2060 arm_state->resume_blocked = 0;
2061 }
2062
2063 /* Initiate suspend via slot handler. Should be called with the write lock
2064 * held */
2065 VCHIQ_STATUS_T
2066 vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
2067 {
2068 VCHIQ_STATUS_T status = VCHIQ_ERROR;
2069 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2070
2071 if (!arm_state)
2072 goto out;
2073
2074 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2075 status = VCHIQ_SUCCESS;
2076
2077
2078 switch (arm_state->vc_suspend_state) {
2079 case VC_SUSPEND_REQUESTED:
2080 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
2081 "requested", __func__);
2082 break;
2083 case VC_SUSPEND_IN_PROGRESS:
2084 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
2085 "progress", __func__);
2086 break;
2087
2088 default:
2089 /* We don't expect to be in other states, so log but continue
2090 * anyway */
2091 vchiq_log_error(vchiq_susp_log_level,
2092 "%s unexpected suspend state %s", __func__,
2093 suspend_state_names[arm_state->vc_suspend_state +
2094 VC_SUSPEND_NUM_OFFSET]);
2095 /* fall through */
2096 case VC_SUSPEND_REJECTED:
2097 case VC_SUSPEND_FAILED:
2098 /* Ensure any idle state actions have been run */
2099 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2100 /* fall through */
2101 case VC_SUSPEND_IDLE:
2102 vchiq_log_info(vchiq_susp_log_level,
2103 "%s: suspending", __func__);
2104 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
2105 /* kick the slot handler thread to initiate suspend */
2106 request_poll(state, NULL, 0);
2107 break;
2108 }
2109
2110 out:
2111 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2112 return status;
2113 }
2114
2115 void
2116 vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
2117 {
2118 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2119 int susp = 0;
2120
2121 if (!arm_state)
2122 goto out;
2123
2124 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2125
2126 write_lock_bh(&arm_state->susp_res_lock);
2127 if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2128 arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2129 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2130 susp = 1;
2131 }
2132 write_unlock_bh(&arm_state->susp_res_lock);
2133
2134 if (susp)
2135 vchiq_platform_suspend(state);
2136
2137 out:
2138 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2139 return;
2140 }
2141
2142
2143 static void
2144 output_timeout_error(VCHIQ_STATE_T *state)
2145 {
2146 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2147 char err[50] = "";
2148 int vc_use_count = arm_state->videocore_use_count;
2149 int active_services = state->unused_service;
2150 int i;
2151
2152 if (!arm_state->videocore_use_count) {
2153 snprintf(err, sizeof(err), " Videocore usecount is 0");
2154 goto output_msg;
2155 }
2156 for (i = 0; i < active_services; i++) {
2157 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2158 if (service_ptr && service_ptr->service_use_count &&
2159 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
2160 snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
2161 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2162 service_ptr->base.fourcc),
2163 service_ptr->client_id,
2164 service_ptr->service_use_count,
2165 service_ptr->service_use_count ==
2166 vc_use_count ? "" : " (+ more)");
2167 break;
2168 }
2169 }
2170
2171 output_msg:
2172 vchiq_log_error(vchiq_susp_log_level,
2173 "timed out waiting for vc suspend (%d).%s",
2174 arm_state->autosuspend_override, err);
2175
2176 }
2177
2178 /* Try to get videocore into suspended state, regardless of autosuspend state.
2179 ** We don't actually force suspend, since videocore may get into a bad state
2180 ** if we force suspend at a bad time. Instead, we wait for autosuspend to
2181 ** determine a good point to suspend. If this doesn't happen within 100ms we
2182 ** report failure.
2183 **
2184 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2185 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2186 */
2187 VCHIQ_STATUS_T
2188 vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
2189 {
2190 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2191 VCHIQ_STATUS_T status = VCHIQ_ERROR;
2192 long rc = 0;
2193 int repeat = -1;
2194
2195 if (!arm_state)
2196 goto out;
2197
2198 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2199
2200 write_lock_bh(&arm_state->susp_res_lock);
2201
2202 status = block_resume(arm_state);
2203 if (status != VCHIQ_SUCCESS)
2204 goto unlock;
2205 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2206 /* Already suspended - just block resume and exit */
2207 vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
2208 __func__);
2209 status = VCHIQ_SUCCESS;
2210 goto unlock;
2211 } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
2212 /* initiate suspend immediately in the case that we're waiting
2213 * for the timeout */
2214 stop_suspend_timer(arm_state);
2215 if (!vchiq_videocore_wanted(state)) {
2216 vchiq_log_info(vchiq_susp_log_level, "%s videocore "
2217 "idle, initiating suspend", __func__);
2218 status = vchiq_arm_vcsuspend(state);
2219 } else if (arm_state->autosuspend_override <
2220 FORCE_SUSPEND_FAIL_MAX) {
2221 vchiq_log_info(vchiq_susp_log_level, "%s letting "
2222 "videocore go idle", __func__);
2223 status = VCHIQ_SUCCESS;
2224 } else {
2225 vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
2226 "many times - attempting suspend", __func__);
2227 status = vchiq_arm_vcsuspend(state);
2228 }
2229 } else {
2230 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
2231 "in progress - wait for completion", __func__);
2232 status = VCHIQ_SUCCESS;
2233 }
2234
2235 /* Wait for suspend to happen due to system idle (not forced..) */
2236 if (status != VCHIQ_SUCCESS)
2237 goto unblock_resume;
2238
2239 do {
2240 write_unlock_bh(&arm_state->susp_res_lock);
2241
2242 rc = wait_for_completion_interruptible_timeout(
2243 &arm_state->vc_suspend_complete,
2244 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
2245
2246 write_lock_bh(&arm_state->susp_res_lock);
2247 if (rc < 0) {
2248 vchiq_log_warning(vchiq_susp_log_level, "%s "
2249 "interrupted waiting for suspend", __func__);
2250 status = VCHIQ_ERROR;
2251 goto unblock_resume;
2252 } else if (rc == 0) {
2253 if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
2254 /* Repeat timeout once if in progress */
2255 if (repeat < 0) {
2256 repeat = 1;
2257 continue;
2258 }
2259 }
2260 arm_state->autosuspend_override++;
2261 output_timeout_error(state);
2262
2263 status = VCHIQ_RETRY;
2264 goto unblock_resume;
2265 }
2266 } while (0 < (repeat--));
2267
2268 /* Check and report state in case we need to abort ARM suspend */
2269 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
2270 status = VCHIQ_RETRY;
2271 vchiq_log_error(vchiq_susp_log_level,
2272 "%s videocore suspend failed (state %s)", __func__,
2273 suspend_state_names[arm_state->vc_suspend_state +
2274 VC_SUSPEND_NUM_OFFSET]);
2275 /* Reset the state only if it's still in an error state.
2276 * Something could have already initiated another suspend. */
2277 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
2278 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2279
2280 goto unblock_resume;
2281 }
2282
2283 /* successfully suspended - unlock and exit */
2284 goto unlock;
2285
2286 unblock_resume:
2287 /* all error states need to unblock resume before exit */
2288 unblock_resume(arm_state);
2289
2290 unlock:
2291 write_unlock_bh(&arm_state->susp_res_lock);
2292
2293 out:
2294 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2295 return status;
2296 }
2297
2298 void
2299 vchiq_check_suspend(VCHIQ_STATE_T *state)
2300 {
2301 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2302
2303 if (!arm_state)
2304 goto out;
2305
2306 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2307
2308 write_lock_bh(&arm_state->susp_res_lock);
2309 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2310 arm_state->first_connect &&
2311 !vchiq_videocore_wanted(state)) {
2312 vchiq_arm_vcsuspend(state);
2313 }
2314 write_unlock_bh(&arm_state->susp_res_lock);
2315
2316 out:
2317 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2318 return;
2319 }
2320
2321
2322 int
2323 vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
2324 {
2325 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2326 int resume = 0;
2327 int ret = -1;
2328
2329 if (!arm_state)
2330 goto out;
2331
2332 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2333
2334 write_lock_bh(&arm_state->susp_res_lock);
2335 unblock_resume(arm_state);
2336 resume = vchiq_check_resume(state);
2337 write_unlock_bh(&arm_state->susp_res_lock);
2338
2339 if (resume) {
2340 if (wait_for_completion_interruptible(
2341 &arm_state->vc_resume_complete) < 0) {
2342 vchiq_log_error(vchiq_susp_log_level,
2343 "%s interrupted", __func__);
2344 /* failed, cannot accurately derive suspend
2345 * state, so exit early. */
2346 goto out;
2347 }
2348 }
2349
2350 read_lock_bh(&arm_state->susp_res_lock);
2351 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2352 vchiq_log_info(vchiq_susp_log_level,
2353 "%s: Videocore remains suspended", __func__);
2354 } else {
2355 vchiq_log_info(vchiq_susp_log_level,
2356 "%s: Videocore resumed", __func__);
2357 ret = 0;
2358 }
2359 read_unlock_bh(&arm_state->susp_res_lock);
2360 out:
2361 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2362 return ret;
2363 }
2364
2365 /* This function should be called with the write lock held */
2366 int
2367 vchiq_check_resume(VCHIQ_STATE_T *state)
2368 {
2369 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2370 int resume = 0;
2371
2372 if (!arm_state)
2373 goto out;
2374
2375 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2376
2377 if (need_resume(state)) {
2378 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2379 request_poll(state, NULL, 0);
2380 resume = 1;
2381 }
2382
2383 out:
2384 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2385 return resume;
2386 }
2387
2388 VCHIQ_STATUS_T
2389 vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
2390 enum USE_TYPE_E use_type)
2391 {
2392 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2393 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2394 char entity[16];
2395 int *entity_uc;
2396 int local_uc, local_entity_uc;
2397
2398 if (!arm_state)
2399 goto out;
2400
2401 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2402
2403 if (use_type == USE_TYPE_VCHIQ) {
2404 sprintf(entity, "VCHIQ: ");
2405 entity_uc = &arm_state->peer_use_count;
2406 } else if (service) {
2407 sprintf(entity, "%c%c%c%c:%03d",
2408 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2409 service->client_id);
2410 entity_uc = &service->service_use_count;
2411 } else {
2412 vchiq_log_error(vchiq_susp_log_level, "%s null service "
2413 "ptr", __func__);
2414 ret = VCHIQ_ERROR;
2415 goto out;
2416 }
2417
2418 write_lock_bh(&arm_state->susp_res_lock);
2419 while (arm_state->resume_blocked) {
2420 /* If we call 'use' while force suspend is waiting for suspend,
2421 * then we're about to block the thread which the force is
2422 * waiting to complete, so we're bound to just time out. In this
2423 * case, set the suspend state such that the wait will be
2424 * canceled, so we can complete as quickly as possible. */
2425 if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2426 VC_SUSPEND_IDLE) {
2427 set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2428 break;
2429 }
2430 /* If suspend is already in progress then we need to block */
2431 if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2432 /* Indicate that there are threads waiting on the resume
2433 * blocker. These need to be allowed to complete before
2434 * a _second_ call to force suspend can complete,
2435 * otherwise low priority threads might never actually
2436 * continue */
2437 arm_state->blocked_count++;
2438 write_unlock_bh(&arm_state->susp_res_lock);
2439 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2440 "blocked - waiting...", __func__, entity);
2441 if (wait_for_completion_killable(
2442 &arm_state->resume_blocker) != 0) {
2443 vchiq_log_error(vchiq_susp_log_level, "%s %s "
2444 "wait for resume blocker interrupted",
2445 __func__, entity);
2446 ret = VCHIQ_ERROR;
2447 write_lock_bh(&arm_state->susp_res_lock);
2448 arm_state->blocked_count--;
2449 write_unlock_bh(&arm_state->susp_res_lock);
2450 goto out;
2451 }
2452 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2453 "unblocked", __func__, entity);
2454 write_lock_bh(&arm_state->susp_res_lock);
2455 if (--arm_state->blocked_count == 0)
2456 complete_all(&arm_state->blocked_blocker);
2457 }
2458 }
2459
2460 stop_suspend_timer(arm_state);
2461
2462 local_uc = ++arm_state->videocore_use_count;
2463 local_entity_uc = ++(*entity_uc);
2464
2465 /* If there's a pending request which hasn't yet been serviced then
2466 * just clear it. If we're past VC_SUSPEND_REQUESTED state then
2467 * vc_resume_complete will block until we either resume or fail to
2468 * suspend */
2469 if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2470 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2471
2472 if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2473 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2474 vchiq_log_info(vchiq_susp_log_level,
2475 "%s %s count %d, state count %d",
2476 __func__, entity, local_entity_uc, local_uc);
2477 request_poll(state, NULL, 0);
2478 } else
2479 vchiq_log_trace(vchiq_susp_log_level,
2480 "%s %s count %d, state count %d",
2481 __func__, entity, *entity_uc, local_uc);
2482
2483
2484 write_unlock_bh(&arm_state->susp_res_lock);
2485
2486 /* Completion is in a done state when we're not suspended, so this won't
2487 * block for the non-suspended case. */
2488 if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
2489 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
2490 __func__, entity);
2491 if (wait_for_completion_killable(
2492 &arm_state->vc_resume_complete) != 0) {
2493 vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
2494 "resume interrupted", __func__, entity);
2495 ret = VCHIQ_ERROR;
2496 goto out;
2497 }
2498 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
2499 entity);
2500 }
2501
2502 if (ret == VCHIQ_SUCCESS) {
2503 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2504 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2505 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2506 /* Send the use notify to videocore */
2507 status = vchiq_send_remote_use_active(state);
2508 if (status == VCHIQ_SUCCESS)
2509 ack_cnt--;
2510 else
2511 atomic_add(ack_cnt,
2512 &arm_state->ka_use_ack_count);
2513 }
2514 }
2515
2516 out:
2517 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2518 return ret;
2519 }
2520
2521 VCHIQ_STATUS_T
2522 vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
2523 {
2524 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2525 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2526 char entity[16];
2527 int *entity_uc;
2528 int local_uc, local_entity_uc;
2529
2530 if (!arm_state)
2531 goto out;
2532
2533 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2534
2535 if (service) {
2536 sprintf(entity, "%c%c%c%c:%03d",
2537 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2538 service->client_id);
2539 entity_uc = &service->service_use_count;
2540 } else {
2541 sprintf(entity, "PEER: ");
2542 entity_uc = &arm_state->peer_use_count;
2543 }
2544
2545 write_lock_bh(&arm_state->susp_res_lock);
2546 if (!arm_state->videocore_use_count || !(*entity_uc)) {
2547 /* Don't use BUG_ON - don't allow user thread to crash kernel */
2548 WARN_ON(!arm_state->videocore_use_count);
2549 WARN_ON(!(*entity_uc));
2550 ret = VCHIQ_ERROR;
2551 goto unlock;
2552 }
2553 local_uc = --arm_state->videocore_use_count;
2554 local_entity_uc = --(*entity_uc);
2555
2556 if (!vchiq_videocore_wanted(state)) {
2557 if (vchiq_platform_use_suspend_timer() &&
2558 !arm_state->resume_blocked) {
2559 /* Only use the timer if we're not trying to force
2560 * suspend (=> resume_blocked) */
2561 start_suspend_timer(arm_state);
2562 } else {
2563 vchiq_log_info(vchiq_susp_log_level,
2564 "%s %s count %d, state count %d - suspending",
2565 __func__, entity, *entity_uc,
2566 arm_state->videocore_use_count);
2567 vchiq_arm_vcsuspend(state);
2568 }
2569 } else
2570 vchiq_log_trace(vchiq_susp_log_level,
2571 "%s %s count %d, state count %d",
2572 __func__, entity, *entity_uc,
2573 arm_state->videocore_use_count);
2574
2575 unlock:
2576 write_unlock_bh(&arm_state->susp_res_lock);
2577
2578 out:
2579 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2580 return ret;
2581 }
2582
2583 void
2584 vchiq_on_remote_use(VCHIQ_STATE_T *state)
2585 {
2586 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2587 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2588 atomic_inc(&arm_state->ka_use_count);
2589 complete(&arm_state->ka_evt);
2590 }
2591
2592 void
2593 vchiq_on_remote_release(VCHIQ_STATE_T *state)
2594 {
2595 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2596 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2597 atomic_inc(&arm_state->ka_release_count);
2598 complete(&arm_state->ka_evt);
2599 }
2600
2601 VCHIQ_STATUS_T
2602 vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
2603 {
2604 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2605 }
2606
2607 VCHIQ_STATUS_T
2608 vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
2609 {
2610 return vchiq_release_internal(service->state, service);
2611 }
2612
2613 VCHIQ_DEBUGFS_NODE_T *
2614 vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
2615 {
2616 return &instance->debugfs_node;
2617 }
2618
2619 int
2620 vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
2621 {
2622 VCHIQ_SERVICE_T *service;
2623 int use_count = 0, i;
2624 i = 0;
2625 while ((service = next_service_by_instance(instance->state,
2626 instance, &i)) != NULL) {
2627 use_count += service->service_use_count;
2628 unlock_service(service);
2629 }
2630 return use_count;
2631 }
2632
2633 int
2634 vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
2635 {
2636 return instance->pid;
2637 }
2638
2639 int
2640 vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
2641 {
2642 return instance->trace;
2643 }
2644
2645 void
2646 vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
2647 {
2648 VCHIQ_SERVICE_T *service;
2649 int i;
2650 i = 0;
2651 while ((service = next_service_by_instance(instance->state,
2652 instance, &i)) != NULL) {
2653 service->trace = trace;
2654 unlock_service(service);
2655 }
2656 instance->trace = (trace != 0);
2657 }
2658
2659 static void suspend_timer_callback(unsigned long context)
2660 {
2661 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
2662 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2663 if (!arm_state)
2664 goto out;
2665 vchiq_log_info(vchiq_susp_log_level,
2666 "%s - suspend timer expired - check suspend", __func__);
2667 vchiq_check_suspend(state);
2668 out:
2669 return;
2670 }
2671
2672 VCHIQ_STATUS_T
2673 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
2674 {
2675 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2676 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2677 if (service) {
2678 ret = vchiq_use_internal(service->state, service,
2679 USE_TYPE_SERVICE_NO_RESUME);
2680 unlock_service(service);
2681 }
2682 return ret;
2683 }
2684
2685 VCHIQ_STATUS_T
2686 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
2687 {
2688 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2689 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2690 if (service) {
2691 ret = vchiq_use_internal(service->state, service,
2692 USE_TYPE_SERVICE);
2693 unlock_service(service);
2694 }
2695 return ret;
2696 }
2697
2698 VCHIQ_STATUS_T
2699 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
2700 {
2701 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2702 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2703 if (service) {
2704 ret = vchiq_release_internal(service->state, service);
2705 unlock_service(service);
2706 }
2707 return ret;
2708 }
2709
2710 void
2711 vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
2712 {
2713 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2714 int i, j = 0;
2715 /* Only dump 64 services */
2716 static const int local_max_services = 64;
2717 /* If there's more than 64 services, only dump ones with
2718 * non-zero counts */
2719 int only_nonzero = 0;
2720 static const char *nz = "<-- preventing suspend";
2721
2722 enum vc_suspend_status vc_suspend_state;
2723 enum vc_resume_status vc_resume_state;
2724 int peer_count;
2725 int vc_use_count;
2726 int active_services;
2727 struct service_data_struct {
2728 int fourcc;
2729 int clientid;
2730 int use_count;
2731 } service_data[local_max_services];
2732
2733 if (!arm_state)
2734 return;
2735
2736 read_lock_bh(&arm_state->susp_res_lock);
2737 vc_suspend_state = arm_state->vc_suspend_state;
2738 vc_resume_state = arm_state->vc_resume_state;
2739 peer_count = arm_state->peer_use_count;
2740 vc_use_count = arm_state->videocore_use_count;
2741 active_services = state->unused_service;
2742 if (active_services > local_max_services)
2743 only_nonzero = 1;
2744
2745 for (i = 0; (i < active_services) && (j < local_max_services); i++) {
2746 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2747 if (!service_ptr)
2748 continue;
2749
2750 if (only_nonzero && !service_ptr->service_use_count)
2751 continue;
2752
2753 if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
2754 service_data[j].fourcc = service_ptr->base.fourcc;
2755 service_data[j].clientid = service_ptr->client_id;
2756 service_data[j++].use_count = service_ptr->
2757 service_use_count;
2758 }
2759 }
2760
2761 read_unlock_bh(&arm_state->susp_res_lock);
2762
2763 vchiq_log_warning(vchiq_susp_log_level,
2764 "-- Videcore suspend state: %s --",
2765 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
2766 vchiq_log_warning(vchiq_susp_log_level,
2767 "-- Videcore resume state: %s --",
2768 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
2769
2770 if (only_nonzero)
2771 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2772 "services (%d). Only dumping up to first %d services "
2773 "with non-zero use-count", active_services,
2774 local_max_services);
2775
2776 for (i = 0; i < j; i++) {
2777 vchiq_log_warning(vchiq_susp_log_level,
2778 "----- %c%c%c%c:%d service count %d %s",
2779 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2780 service_data[i].clientid,
2781 service_data[i].use_count,
2782 service_data[i].use_count ? nz : "");
2783 }
2784 vchiq_log_warning(vchiq_susp_log_level,
2785 "----- VCHIQ use count count %d", peer_count);
2786 vchiq_log_warning(vchiq_susp_log_level,
2787 "--- Overall vchiq instance use count %d", vc_use_count);
2788
2789 vchiq_dump_platform_use_state(state);
2790 }
2791
2792 VCHIQ_STATUS_T
2793 vchiq_check_service(VCHIQ_SERVICE_T *service)
2794 {
2795 VCHIQ_ARM_STATE_T *arm_state;
2796 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2797
2798 if (!service || !service->state)
2799 goto out;
2800
2801 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2802
2803 arm_state = vchiq_platform_get_arm_state(service->state);
2804
2805 read_lock_bh(&arm_state->susp_res_lock);
2806 if (service->service_use_count)
2807 ret = VCHIQ_SUCCESS;
2808 read_unlock_bh(&arm_state->susp_res_lock);
2809
2810 if (ret == VCHIQ_ERROR) {
2811 vchiq_log_error(vchiq_susp_log_level,
2812 "%s ERROR - %c%c%c%c:%d service count %d, "
2813 "state count %d, videocore suspend state %s", __func__,
2814 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2815 service->client_id, service->service_use_count,
2816 arm_state->videocore_use_count,
2817 suspend_state_names[arm_state->vc_suspend_state +
2818 VC_SUSPEND_NUM_OFFSET]);
2819 vchiq_dump_service_use_state(service->state);
2820 }
2821 out:
2822 return ret;
2823 }
2824
2825 /* stub functions */
2826 void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
2827 {
2828 (void)state;
2829 }
2830
2831 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
2832 VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
2833 {
2834 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2835 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2836 get_conn_state_name(oldstate), get_conn_state_name(newstate));
2837 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
2838 write_lock_bh(&arm_state->susp_res_lock);
2839 if (!arm_state->first_connect) {
2840 char threadname[16];
2841 arm_state->first_connect = 1;
2842 write_unlock_bh(&arm_state->susp_res_lock);
2843 snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
2844 state->id);
2845 arm_state->ka_thread = kthread_create(
2846 &vchiq_keepalive_thread_func,
2847 (void *)state,
2848 threadname);
2849 if (IS_ERR(arm_state->ka_thread)) {
2850 vchiq_log_error(vchiq_susp_log_level,
2851 "vchiq: FATAL: couldn't create thread %s",
2852 threadname);
2853 } else {
2854 wake_up_process(arm_state->ka_thread);
2855 }
2856 } else
2857 write_unlock_bh(&arm_state->susp_res_lock);
2858 }
2859 }
2860
2861 static int vchiq_probe(struct platform_device *pdev)
2862 {
2863 struct device_node *fw_node;
2864 struct rpi_firmware *fw;
2865 int err;
2866 void *ptr_err;
2867
2868 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
2869 if (!fw_node) {
2870 dev_err(&pdev->dev, "Missing firmware node\n");
2871 return -ENOENT;
2872 }
2873
2874 fw = rpi_firmware_get(fw_node);
2875 of_node_put(fw_node);
2876 if (!fw)
2877 return -EPROBE_DEFER;
2878
2879 platform_set_drvdata(pdev, fw);
2880
2881 err = vchiq_platform_init(pdev, &g_state);
2882 if (err != 0)
2883 goto failed_platform_init;
2884
2885 err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
2886 if (err != 0) {
2887 vchiq_log_error(vchiq_arm_log_level,
2888 "Unable to allocate device number");
2889 goto failed_platform_init;
2890 }
2891 cdev_init(&vchiq_cdev, &vchiq_fops);
2892 vchiq_cdev.owner = THIS_MODULE;
2893 err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
2894 if (err != 0) {
2895 vchiq_log_error(vchiq_arm_log_level,
2896 "Unable to register device");
2897 goto failed_cdev_add;
2898 }
2899
2900 /* create sysfs entries */
2901 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
2902 ptr_err = vchiq_class;
2903 if (IS_ERR(ptr_err))
2904 goto failed_class_create;
2905
2906 vchiq_dev = device_create(vchiq_class, NULL,
2907 vchiq_devid, NULL, "vchiq");
2908 ptr_err = vchiq_dev;
2909 if (IS_ERR(ptr_err))
2910 goto failed_device_create;
2911
2912 /* create debugfs entries */
2913 err = vchiq_debugfs_init();
2914 if (err != 0)
2915 goto failed_debugfs_init;
2916
2917 vchiq_log_info(vchiq_arm_log_level,
2918 "vchiq: initialised - version %d (min %d), device %d.%d",
2919 VCHIQ_VERSION, VCHIQ_VERSION_MIN,
2920 MAJOR(vchiq_devid), MINOR(vchiq_devid));
2921
2922 return 0;
2923
2924 failed_debugfs_init:
2925 device_destroy(vchiq_class, vchiq_devid);
2926 failed_device_create:
2927 class_destroy(vchiq_class);
2928 failed_class_create:
2929 cdev_del(&vchiq_cdev);
2930 err = PTR_ERR(ptr_err);
2931 failed_cdev_add:
2932 unregister_chrdev_region(vchiq_devid, 1);
2933 failed_platform_init:
2934 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2935 return err;
2936 }
2937
2938 static int vchiq_remove(struct platform_device *pdev)
2939 {
2940 vchiq_debugfs_deinit();
2941 device_destroy(vchiq_class, vchiq_devid);
2942 class_destroy(vchiq_class);
2943 cdev_del(&vchiq_cdev);
2944 unregister_chrdev_region(vchiq_devid, 1);
2945
2946 return 0;
2947 }
2948
2949 static const struct of_device_id vchiq_of_match[] = {
2950 { .compatible = "brcm,bcm2835-vchiq", },
2951 {},
2952 };
2953 MODULE_DEVICE_TABLE(of, vchiq_of_match);
2954
2955 static struct platform_driver vchiq_driver = {
2956 .driver = {
2957 .name = "bcm2835_vchiq",
2958 .of_match_table = vchiq_of_match,
2959 },
2960 .probe = vchiq_probe,
2961 .remove = vchiq_remove,
2962 };
2963 module_platform_driver(vchiq_driver);
2964
2965 MODULE_LICENSE("GPL");
2966 MODULE_DESCRIPTION("Videocore VCHIQ driver");
2967 MODULE_AUTHOR("Broadcom Corporation");