]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
UBUNTU: Ubuntu-4.10.0-37.41
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_arm.c
1 /**
2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
3 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The names of the above-listed copyright holders may not be used
15 * to endorse or promote products derived from this software without
16 * specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2, as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/cdev.h>
40 #include <linux/fs.h>
41 #include <linux/device.h>
42 #include <linux/mm.h>
43 #include <linux/highmem.h>
44 #include <linux/pagemap.h>
45 #include <linux/bug.h>
46 #include <linux/semaphore.h>
47 #include <linux/list.h>
48 #include <linux/of.h>
49 #include <linux/platform_device.h>
50 #include <soc/bcm2835/raspberrypi-firmware.h>
51
52 #include "vchiq_core.h"
53 #include "vchiq_ioctl.h"
54 #include "vchiq_arm.h"
55 #include "vchiq_debugfs.h"
56 #include "vchiq_killable.h"
57
58 #define DEVICE_NAME "vchiq"
59
60 /* Override the default prefix, which would be vchiq_arm (from the filename) */
61 #undef MODULE_PARAM_PREFIX
62 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
63
64 #define VCHIQ_MINOR 0
65
66 /* Some per-instance constants */
67 #define MAX_COMPLETIONS 16
68 #define MAX_SERVICES 64
69 #define MAX_ELEMENTS 8
70 #define MSG_QUEUE_SIZE 64
71
72 #define KEEPALIVE_VER 1
73 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
74
75 /* Run time control of log level, based on KERN_XXX level. */
76 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
77 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
78
79 #define SUSPEND_TIMER_TIMEOUT_MS 100
80 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
81
82 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
83 static const char *const suspend_state_names[] = {
84 "VC_SUSPEND_FORCE_CANCELED",
85 "VC_SUSPEND_REJECTED",
86 "VC_SUSPEND_FAILED",
87 "VC_SUSPEND_IDLE",
88 "VC_SUSPEND_REQUESTED",
89 "VC_SUSPEND_IN_PROGRESS",
90 "VC_SUSPEND_SUSPENDED"
91 };
92 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
93 static const char *const resume_state_names[] = {
94 "VC_RESUME_FAILED",
95 "VC_RESUME_IDLE",
96 "VC_RESUME_REQUESTED",
97 "VC_RESUME_IN_PROGRESS",
98 "VC_RESUME_RESUMED"
99 };
100 /* The number of times we allow force suspend to timeout before actually
101 ** _forcing_ suspend. This is to cater for SW which fails to release vchiq
102 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
103 */
104 #define FORCE_SUSPEND_FAIL_MAX 8
105
106 /* The time in ms allowed for videocore to go idle when force suspend has been
107 * requested */
108 #define FORCE_SUSPEND_TIMEOUT_MS 200
109
110
111 static void suspend_timer_callback(unsigned long context);
112
113
114 typedef struct user_service_struct {
115 VCHIQ_SERVICE_T *service;
116 void *userdata;
117 VCHIQ_INSTANCE_T instance;
118 char is_vchi;
119 char dequeue_pending;
120 char close_pending;
121 int message_available_pos;
122 int msg_insert;
123 int msg_remove;
124 struct semaphore insert_event;
125 struct semaphore remove_event;
126 struct semaphore close_event;
127 VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
128 } USER_SERVICE_T;
129
130 struct bulk_waiter_node {
131 struct bulk_waiter bulk_waiter;
132 int pid;
133 struct list_head list;
134 };
135
136 struct vchiq_instance_struct {
137 VCHIQ_STATE_T *state;
138 VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
139 int completion_insert;
140 int completion_remove;
141 struct semaphore insert_event;
142 struct semaphore remove_event;
143 struct mutex completion_mutex;
144
145 int connected;
146 int closing;
147 int pid;
148 int mark;
149 int use_close_delivered;
150 int trace;
151
152 struct list_head bulk_waiter_list;
153 struct mutex bulk_waiter_list_mutex;
154
155 VCHIQ_DEBUGFS_NODE_T debugfs_node;
156 };
157
158 typedef struct dump_context_struct {
159 char __user *buf;
160 size_t actual;
161 size_t space;
162 loff_t offset;
163 } DUMP_CONTEXT_T;
164
165 static struct cdev vchiq_cdev;
166 static dev_t vchiq_devid;
167 static VCHIQ_STATE_T g_state;
168 static struct class *vchiq_class;
169 static struct device *vchiq_dev;
170 static DEFINE_SPINLOCK(msg_queue_spinlock);
171
172 static const char *const ioctl_names[] = {
173 "CONNECT",
174 "SHUTDOWN",
175 "CREATE_SERVICE",
176 "REMOVE_SERVICE",
177 "QUEUE_MESSAGE",
178 "QUEUE_BULK_TRANSMIT",
179 "QUEUE_BULK_RECEIVE",
180 "AWAIT_COMPLETION",
181 "DEQUEUE_MESSAGE",
182 "GET_CLIENT_ID",
183 "GET_CONFIG",
184 "CLOSE_SERVICE",
185 "USE_SERVICE",
186 "RELEASE_SERVICE",
187 "SET_SERVICE_OPTION",
188 "DUMP_PHYS_MEM",
189 "LIB_VERSION",
190 "CLOSE_DELIVERED"
191 };
192
193 vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
194 (VCHIQ_IOC_MAX + 1));
195
196 static void
197 dump_phys_mem(void *virt_addr, uint32_t num_bytes);
198
199 /****************************************************************************
200 *
201 * add_completion
202 *
203 ***************************************************************************/
204
205 static VCHIQ_STATUS_T
206 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
207 VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
208 void *bulk_userdata)
209 {
210 VCHIQ_COMPLETION_DATA_T *completion;
211 DEBUG_INITIALISE(g_state.local)
212
213 while (instance->completion_insert ==
214 (instance->completion_remove + MAX_COMPLETIONS)) {
215 /* Out of space - wait for the client */
216 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
217 vchiq_log_trace(vchiq_arm_log_level,
218 "add_completion - completion queue full");
219 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
220 if (down_interruptible(&instance->remove_event) != 0) {
221 vchiq_log_info(vchiq_arm_log_level,
222 "service_callback interrupted");
223 return VCHIQ_RETRY;
224 } else if (instance->closing) {
225 vchiq_log_info(vchiq_arm_log_level,
226 "service_callback closing");
227 return VCHIQ_ERROR;
228 }
229 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
230 }
231
232 completion =
233 &instance->completions[instance->completion_insert &
234 (MAX_COMPLETIONS - 1)];
235
236 completion->header = header;
237 completion->reason = reason;
238 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
239 completion->service_userdata = user_service->service;
240 completion->bulk_userdata = bulk_userdata;
241
242 if (reason == VCHIQ_SERVICE_CLOSED) {
243 /* Take an extra reference, to be held until
244 this CLOSED notification is delivered. */
245 lock_service(user_service->service);
246 if (instance->use_close_delivered)
247 user_service->close_pending = 1;
248 }
249
250 /* A write barrier is needed here to ensure that the entire completion
251 record is written out before the insert point. */
252 wmb();
253
254 if (reason == VCHIQ_MESSAGE_AVAILABLE)
255 user_service->message_available_pos =
256 instance->completion_insert;
257 instance->completion_insert++;
258
259 up(&instance->insert_event);
260
261 return VCHIQ_SUCCESS;
262 }
263
264 /****************************************************************************
265 *
266 * service_callback
267 *
268 ***************************************************************************/
269
270 static VCHIQ_STATUS_T
271 service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
272 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
273 {
274 /* How do we ensure the callback goes to the right client?
275 ** The service_user data points to a USER_SERVICE_T record containing
276 ** the original callback and the user state structure, which contains a
277 ** circular buffer for completion records.
278 */
279 USER_SERVICE_T *user_service;
280 VCHIQ_SERVICE_T *service;
281 VCHIQ_INSTANCE_T instance;
282 DEBUG_INITIALISE(g_state.local)
283
284 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
285
286 service = handle_to_service(handle);
287 BUG_ON(!service);
288 user_service = (USER_SERVICE_T *)service->base.userdata;
289 instance = user_service->instance;
290
291 if (!instance || instance->closing)
292 return VCHIQ_SUCCESS;
293
294 vchiq_log_trace(vchiq_arm_log_level,
295 "service_callback - service %lx(%d,%p), reason %d, header %lx, "
296 "instance %lx, bulk_userdata %lx",
297 (unsigned long)user_service,
298 service->localport, user_service->userdata,
299 reason, (unsigned long)header,
300 (unsigned long)instance, (unsigned long)bulk_userdata);
301
302 if (header && user_service->is_vchi) {
303 spin_lock(&msg_queue_spinlock);
304 while (user_service->msg_insert ==
305 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
306 spin_unlock(&msg_queue_spinlock);
307 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
308 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
309 vchiq_log_trace(vchiq_arm_log_level,
310 "service_callback - msg queue full");
311 /* If there is no MESSAGE_AVAILABLE in the completion
312 ** queue, add one
313 */
314 if ((user_service->message_available_pos -
315 instance->completion_remove) < 0) {
316 VCHIQ_STATUS_T status;
317 vchiq_log_info(vchiq_arm_log_level,
318 "Inserting extra MESSAGE_AVAILABLE");
319 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
320 status = add_completion(instance, reason,
321 NULL, user_service, bulk_userdata);
322 if (status != VCHIQ_SUCCESS) {
323 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
324 return status;
325 }
326 }
327
328 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
329 if (down_interruptible(&user_service->remove_event)
330 != 0) {
331 vchiq_log_info(vchiq_arm_log_level,
332 "service_callback interrupted");
333 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
334 return VCHIQ_RETRY;
335 } else if (instance->closing) {
336 vchiq_log_info(vchiq_arm_log_level,
337 "service_callback closing");
338 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
339 return VCHIQ_ERROR;
340 }
341 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
342 spin_lock(&msg_queue_spinlock);
343 }
344
345 user_service->msg_queue[user_service->msg_insert &
346 (MSG_QUEUE_SIZE - 1)] = header;
347 user_service->msg_insert++;
348 spin_unlock(&msg_queue_spinlock);
349
350 up(&user_service->insert_event);
351
352 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
353 ** there is a MESSAGE_AVAILABLE in the completion queue then
354 ** bypass the completion queue.
355 */
356 if (((user_service->message_available_pos -
357 instance->completion_remove) >= 0) ||
358 user_service->dequeue_pending) {
359 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
360 user_service->dequeue_pending = 0;
361 return VCHIQ_SUCCESS;
362 }
363
364 header = NULL;
365 }
366 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
367
368 return add_completion(instance, reason, header, user_service,
369 bulk_userdata);
370 }
371
372 /****************************************************************************
373 *
374 * user_service_free
375 *
376 ***************************************************************************/
377 static void
378 user_service_free(void *userdata)
379 {
380 kfree(userdata);
381 }
382
383 /****************************************************************************
384 *
385 * close_delivered
386 *
387 ***************************************************************************/
388 static void close_delivered(USER_SERVICE_T *user_service)
389 {
390 vchiq_log_info(vchiq_arm_log_level,
391 "close_delivered(handle=%x)",
392 user_service->service->handle);
393
394 if (user_service->close_pending) {
395 /* Allow the underlying service to be culled */
396 unlock_service(user_service->service);
397
398 /* Wake the user-thread blocked in close_ or remove_service */
399 up(&user_service->close_event);
400
401 user_service->close_pending = 0;
402 }
403 }
404
405 struct vchiq_io_copy_callback_context {
406 VCHIQ_ELEMENT_T *current_element;
407 size_t current_element_offset;
408 unsigned long elements_to_go;
409 size_t current_offset;
410 };
411
412 static ssize_t
413 vchiq_ioc_copy_element_data(
414 void *context,
415 void *dest,
416 size_t offset,
417 size_t maxsize)
418 {
419 long res;
420 size_t bytes_this_round;
421 struct vchiq_io_copy_callback_context *copy_context =
422 (struct vchiq_io_copy_callback_context *)context;
423
424 if (offset != copy_context->current_offset)
425 return 0;
426
427 if (!copy_context->elements_to_go)
428 return 0;
429
430 /*
431 * Complex logic here to handle the case of 0 size elements
432 * in the middle of the array of elements.
433 *
434 * Need to skip over these 0 size elements.
435 */
436 while (1) {
437 bytes_this_round = min(copy_context->current_element->size -
438 copy_context->current_element_offset,
439 maxsize);
440
441 if (bytes_this_round)
442 break;
443
444 copy_context->elements_to_go--;
445 copy_context->current_element++;
446 copy_context->current_element_offset = 0;
447
448 if (!copy_context->elements_to_go)
449 return 0;
450 }
451
452 res = copy_from_user(dest,
453 copy_context->current_element->data +
454 copy_context->current_element_offset,
455 bytes_this_round);
456
457 if (res != 0)
458 return -EFAULT;
459
460 copy_context->current_element_offset += bytes_this_round;
461 copy_context->current_offset += bytes_this_round;
462
463 /*
464 * Check if done with current element, and if so advance to the next.
465 */
466 if (copy_context->current_element_offset ==
467 copy_context->current_element->size) {
468 copy_context->elements_to_go--;
469 copy_context->current_element++;
470 copy_context->current_element_offset = 0;
471 }
472
473 return bytes_this_round;
474 }
475
476 /**************************************************************************
477 *
478 * vchiq_ioc_queue_message
479 *
480 **************************************************************************/
481 static VCHIQ_STATUS_T
482 vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
483 VCHIQ_ELEMENT_T *elements,
484 unsigned long count)
485 {
486 struct vchiq_io_copy_callback_context context;
487 unsigned long i;
488 size_t total_size = 0;
489
490 context.current_element = elements;
491 context.current_element_offset = 0;
492 context.elements_to_go = count;
493 context.current_offset = 0;
494
495 for (i = 0; i < count; i++) {
496 if (!elements[i].data && elements[i].size != 0)
497 return -EFAULT;
498
499 total_size += elements[i].size;
500 }
501
502 return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
503 &context, total_size);
504 }
505
506 /****************************************************************************
507 *
508 * vchiq_ioctl
509 *
510 ***************************************************************************/
511 static long
512 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
513 {
514 VCHIQ_INSTANCE_T instance = file->private_data;
515 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
516 VCHIQ_SERVICE_T *service = NULL;
517 long ret = 0;
518 int i, rc;
519 DEBUG_INITIALISE(g_state.local)
520
521 vchiq_log_trace(vchiq_arm_log_level,
522 "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
523 instance,
524 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
525 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
526 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
527
528 switch (cmd) {
529 case VCHIQ_IOC_SHUTDOWN:
530 if (!instance->connected)
531 break;
532
533 /* Remove all services */
534 i = 0;
535 while ((service = next_service_by_instance(instance->state,
536 instance, &i)) != NULL) {
537 status = vchiq_remove_service(service->handle);
538 unlock_service(service);
539 if (status != VCHIQ_SUCCESS)
540 break;
541 }
542 service = NULL;
543
544 if (status == VCHIQ_SUCCESS) {
545 /* Wake the completion thread and ask it to exit */
546 instance->closing = 1;
547 up(&instance->insert_event);
548 }
549
550 break;
551
552 case VCHIQ_IOC_CONNECT:
553 if (instance->connected) {
554 ret = -EINVAL;
555 break;
556 }
557 rc = mutex_lock_killable(&instance->state->mutex);
558 if (rc != 0) {
559 vchiq_log_error(vchiq_arm_log_level,
560 "vchiq: connect: could not lock mutex for "
561 "state %d: %d",
562 instance->state->id, rc);
563 ret = -EINTR;
564 break;
565 }
566 status = vchiq_connect_internal(instance->state, instance);
567 mutex_unlock(&instance->state->mutex);
568
569 if (status == VCHIQ_SUCCESS)
570 instance->connected = 1;
571 else
572 vchiq_log_error(vchiq_arm_log_level,
573 "vchiq: could not connect: %d", status);
574 break;
575
576 case VCHIQ_IOC_CREATE_SERVICE: {
577 VCHIQ_CREATE_SERVICE_T args;
578 USER_SERVICE_T *user_service = NULL;
579 void *userdata;
580 int srvstate;
581
582 if (copy_from_user
583 (&args, (const void __user *)arg,
584 sizeof(args)) != 0) {
585 ret = -EFAULT;
586 break;
587 }
588
589 user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
590 if (!user_service) {
591 ret = -ENOMEM;
592 break;
593 }
594
595 if (args.is_open) {
596 if (!instance->connected) {
597 ret = -ENOTCONN;
598 kfree(user_service);
599 break;
600 }
601 srvstate = VCHIQ_SRVSTATE_OPENING;
602 } else {
603 srvstate =
604 instance->connected ?
605 VCHIQ_SRVSTATE_LISTENING :
606 VCHIQ_SRVSTATE_HIDDEN;
607 }
608
609 userdata = args.params.userdata;
610 args.params.callback = service_callback;
611 args.params.userdata = user_service;
612 service = vchiq_add_service_internal(
613 instance->state,
614 &args.params, srvstate,
615 instance, user_service_free);
616
617 if (service != NULL) {
618 user_service->service = service;
619 user_service->userdata = userdata;
620 user_service->instance = instance;
621 user_service->is_vchi = (args.is_vchi != 0);
622 user_service->dequeue_pending = 0;
623 user_service->close_pending = 0;
624 user_service->message_available_pos =
625 instance->completion_remove - 1;
626 user_service->msg_insert = 0;
627 user_service->msg_remove = 0;
628 sema_init(&user_service->insert_event, 0);
629 sema_init(&user_service->remove_event, 0);
630 sema_init(&user_service->close_event, 0);
631
632 if (args.is_open) {
633 status = vchiq_open_service_internal
634 (service, instance->pid);
635 if (status != VCHIQ_SUCCESS) {
636 vchiq_remove_service(service->handle);
637 service = NULL;
638 ret = (status == VCHIQ_RETRY) ?
639 -EINTR : -EIO;
640 break;
641 }
642 }
643
644 if (copy_to_user((void __user *)
645 &(((VCHIQ_CREATE_SERVICE_T __user *)
646 arg)->handle),
647 (const void *)&service->handle,
648 sizeof(service->handle)) != 0) {
649 ret = -EFAULT;
650 vchiq_remove_service(service->handle);
651 }
652
653 service = NULL;
654 } else {
655 ret = -EEXIST;
656 kfree(user_service);
657 }
658 } break;
659
660 case VCHIQ_IOC_CLOSE_SERVICE: {
661 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
662
663 service = find_service_for_instance(instance, handle);
664 if (service != NULL) {
665 USER_SERVICE_T *user_service =
666 (USER_SERVICE_T *)service->base.userdata;
667 /* close_pending is false on first entry, and when the
668 wait in vchiq_close_service has been interrupted. */
669 if (!user_service->close_pending) {
670 status = vchiq_close_service(service->handle);
671 if (status != VCHIQ_SUCCESS)
672 break;
673 }
674
675 /* close_pending is true once the underlying service
676 has been closed until the client library calls the
677 CLOSE_DELIVERED ioctl, signalling close_event. */
678 if (user_service->close_pending &&
679 down_interruptible(&user_service->close_event))
680 status = VCHIQ_RETRY;
681 }
682 else
683 ret = -EINVAL;
684 } break;
685
686 case VCHIQ_IOC_REMOVE_SERVICE: {
687 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
688
689 service = find_service_for_instance(instance, handle);
690 if (service != NULL) {
691 USER_SERVICE_T *user_service =
692 (USER_SERVICE_T *)service->base.userdata;
693 /* close_pending is false on first entry, and when the
694 wait in vchiq_close_service has been interrupted. */
695 if (!user_service->close_pending) {
696 status = vchiq_remove_service(service->handle);
697 if (status != VCHIQ_SUCCESS)
698 break;
699 }
700
701 /* close_pending is true once the underlying service
702 has been closed until the client library calls the
703 CLOSE_DELIVERED ioctl, signalling close_event. */
704 if (user_service->close_pending &&
705 down_interruptible(&user_service->close_event))
706 status = VCHIQ_RETRY;
707 }
708 else
709 ret = -EINVAL;
710 } break;
711
712 case VCHIQ_IOC_USE_SERVICE:
713 case VCHIQ_IOC_RELEASE_SERVICE: {
714 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
715
716 service = find_service_for_instance(instance, handle);
717 if (service != NULL) {
718 status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
719 vchiq_use_service_internal(service) :
720 vchiq_release_service_internal(service);
721 if (status != VCHIQ_SUCCESS) {
722 vchiq_log_error(vchiq_susp_log_level,
723 "%s: cmd %s returned error %d for "
724 "service %c%c%c%c:%03d",
725 __func__,
726 (cmd == VCHIQ_IOC_USE_SERVICE) ?
727 "VCHIQ_IOC_USE_SERVICE" :
728 "VCHIQ_IOC_RELEASE_SERVICE",
729 status,
730 VCHIQ_FOURCC_AS_4CHARS(
731 service->base.fourcc),
732 service->client_id);
733 ret = -EINVAL;
734 }
735 } else
736 ret = -EINVAL;
737 } break;
738
739 case VCHIQ_IOC_QUEUE_MESSAGE: {
740 VCHIQ_QUEUE_MESSAGE_T args;
741 if (copy_from_user
742 (&args, (const void __user *)arg,
743 sizeof(args)) != 0) {
744 ret = -EFAULT;
745 break;
746 }
747
748 service = find_service_for_instance(instance, args.handle);
749
750 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
751 /* Copy elements into kernel space */
752 VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
753 if (copy_from_user(elements, args.elements,
754 args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
755 status = vchiq_ioc_queue_message
756 (args.handle,
757 elements, args.count);
758 else
759 ret = -EFAULT;
760 } else {
761 ret = -EINVAL;
762 }
763 } break;
764
765 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
766 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
767 VCHIQ_QUEUE_BULK_TRANSFER_T args;
768 struct bulk_waiter_node *waiter = NULL;
769 VCHIQ_BULK_DIR_T dir =
770 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
771 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
772
773 if (copy_from_user
774 (&args, (const void __user *)arg,
775 sizeof(args)) != 0) {
776 ret = -EFAULT;
777 break;
778 }
779
780 service = find_service_for_instance(instance, args.handle);
781 if (!service) {
782 ret = -EINVAL;
783 break;
784 }
785
786 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
787 waiter = kzalloc(sizeof(struct bulk_waiter_node),
788 GFP_KERNEL);
789 if (!waiter) {
790 ret = -ENOMEM;
791 break;
792 }
793 args.userdata = &waiter->bulk_waiter;
794 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
795 struct list_head *pos;
796 mutex_lock(&instance->bulk_waiter_list_mutex);
797 list_for_each(pos, &instance->bulk_waiter_list) {
798 if (list_entry(pos, struct bulk_waiter_node,
799 list)->pid == current->pid) {
800 waiter = list_entry(pos,
801 struct bulk_waiter_node,
802 list);
803 list_del(pos);
804 break;
805 }
806
807 }
808 mutex_unlock(&instance->bulk_waiter_list_mutex);
809 if (!waiter) {
810 vchiq_log_error(vchiq_arm_log_level,
811 "no bulk_waiter found for pid %d",
812 current->pid);
813 ret = -ESRCH;
814 break;
815 }
816 vchiq_log_info(vchiq_arm_log_level,
817 "found bulk_waiter %pK for pid %d", waiter,
818 current->pid);
819 args.userdata = &waiter->bulk_waiter;
820 }
821 status = vchiq_bulk_transfer
822 (args.handle,
823 VCHI_MEM_HANDLE_INVALID,
824 args.data, args.size,
825 args.userdata, args.mode,
826 dir);
827 if (!waiter)
828 break;
829 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
830 !waiter->bulk_waiter.bulk) {
831 if (waiter->bulk_waiter.bulk) {
832 /* Cancel the signal when the transfer
833 ** completes. */
834 spin_lock(&bulk_waiter_spinlock);
835 waiter->bulk_waiter.bulk->userdata = NULL;
836 spin_unlock(&bulk_waiter_spinlock);
837 }
838 kfree(waiter);
839 } else {
840 const VCHIQ_BULK_MODE_T mode_waiting =
841 VCHIQ_BULK_MODE_WAITING;
842 waiter->pid = current->pid;
843 mutex_lock(&instance->bulk_waiter_list_mutex);
844 list_add(&waiter->list, &instance->bulk_waiter_list);
845 mutex_unlock(&instance->bulk_waiter_list_mutex);
846 vchiq_log_info(vchiq_arm_log_level,
847 "saved bulk_waiter %pK for pid %d",
848 waiter, current->pid);
849
850 if (copy_to_user((void __user *)
851 &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
852 arg)->mode),
853 (const void *)&mode_waiting,
854 sizeof(mode_waiting)) != 0)
855 ret = -EFAULT;
856 }
857 } break;
858
859 case VCHIQ_IOC_AWAIT_COMPLETION: {
860 VCHIQ_AWAIT_COMPLETION_T args;
861
862 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
863 if (!instance->connected) {
864 ret = -ENOTCONN;
865 break;
866 }
867
868 if (copy_from_user(&args, (const void __user *)arg,
869 sizeof(args)) != 0) {
870 ret = -EFAULT;
871 break;
872 }
873
874 mutex_lock(&instance->completion_mutex);
875
876 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
877 while ((instance->completion_remove ==
878 instance->completion_insert)
879 && !instance->closing) {
880 int rc;
881 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
882 mutex_unlock(&instance->completion_mutex);
883 rc = down_interruptible(&instance->insert_event);
884 mutex_lock(&instance->completion_mutex);
885 if (rc != 0) {
886 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
887 vchiq_log_info(vchiq_arm_log_level,
888 "AWAIT_COMPLETION interrupted");
889 ret = -EINTR;
890 break;
891 }
892 }
893 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
894
895 /* A read memory barrier is needed to stop prefetch of a stale
896 ** completion record
897 */
898 rmb();
899
900 if (ret == 0) {
901 int msgbufcount = args.msgbufcount;
902 for (ret = 0; ret < args.count; ret++) {
903 VCHIQ_COMPLETION_DATA_T *completion;
904 VCHIQ_SERVICE_T *service;
905 USER_SERVICE_T *user_service;
906 VCHIQ_HEADER_T *header;
907 if (instance->completion_remove ==
908 instance->completion_insert)
909 break;
910 completion = &instance->completions[
911 instance->completion_remove &
912 (MAX_COMPLETIONS - 1)];
913
914 service = completion->service_userdata;
915 user_service = service->base.userdata;
916 completion->service_userdata =
917 user_service->userdata;
918
919 header = completion->header;
920 if (header) {
921 void __user *msgbuf;
922 int msglen;
923
924 msglen = header->size +
925 sizeof(VCHIQ_HEADER_T);
926 /* This must be a VCHIQ-style service */
927 if (args.msgbufsize < msglen) {
928 vchiq_log_error(
929 vchiq_arm_log_level,
930 "header %pK: msgbufsize %x < msglen %x",
931 header, args.msgbufsize,
932 msglen);
933 WARN(1, "invalid message "
934 "size\n");
935 if (ret == 0)
936 ret = -EMSGSIZE;
937 break;
938 }
939 if (msgbufcount <= 0)
940 /* Stall here for lack of a
941 ** buffer for the message. */
942 break;
943 /* Get the pointer from user space */
944 msgbufcount--;
945 if (copy_from_user(&msgbuf,
946 (const void __user *)
947 &args.msgbufs[msgbufcount],
948 sizeof(msgbuf)) != 0) {
949 if (ret == 0)
950 ret = -EFAULT;
951 break;
952 }
953
954 /* Copy the message to user space */
955 if (copy_to_user(msgbuf, header,
956 msglen) != 0) {
957 if (ret == 0)
958 ret = -EFAULT;
959 break;
960 }
961
962 /* Now it has been copied, the message
963 ** can be released. */
964 vchiq_release_message(service->handle,
965 header);
966
967 /* The completion must point to the
968 ** msgbuf. */
969 completion->header = msgbuf;
970 }
971
972 if ((completion->reason ==
973 VCHIQ_SERVICE_CLOSED) &&
974 !instance->use_close_delivered)
975 unlock_service(service);
976
977 if (copy_to_user((void __user *)(
978 (size_t)args.buf +
979 ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
980 completion,
981 sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
982 if (ret == 0)
983 ret = -EFAULT;
984 break;
985 }
986
987 instance->completion_remove++;
988 }
989
990 if (msgbufcount != args.msgbufcount) {
991 if (copy_to_user((void __user *)
992 &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
993 msgbufcount,
994 &msgbufcount,
995 sizeof(msgbufcount)) != 0) {
996 ret = -EFAULT;
997 }
998 }
999 }
1000
1001 if (ret != 0)
1002 up(&instance->remove_event);
1003 mutex_unlock(&instance->completion_mutex);
1004 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1005 } break;
1006
1007 case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1008 VCHIQ_DEQUEUE_MESSAGE_T args;
1009 USER_SERVICE_T *user_service;
1010 VCHIQ_HEADER_T *header;
1011
1012 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1013 if (copy_from_user
1014 (&args, (const void __user *)arg,
1015 sizeof(args)) != 0) {
1016 ret = -EFAULT;
1017 break;
1018 }
1019 service = find_service_for_instance(instance, args.handle);
1020 if (!service) {
1021 ret = -EINVAL;
1022 break;
1023 }
1024 user_service = (USER_SERVICE_T *)service->base.userdata;
1025 if (user_service->is_vchi == 0) {
1026 ret = -EINVAL;
1027 break;
1028 }
1029
1030 spin_lock(&msg_queue_spinlock);
1031 if (user_service->msg_remove == user_service->msg_insert) {
1032 if (!args.blocking) {
1033 spin_unlock(&msg_queue_spinlock);
1034 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1035 ret = -EWOULDBLOCK;
1036 break;
1037 }
1038 user_service->dequeue_pending = 1;
1039 do {
1040 spin_unlock(&msg_queue_spinlock);
1041 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1042 if (down_interruptible(
1043 &user_service->insert_event) != 0) {
1044 vchiq_log_info(vchiq_arm_log_level,
1045 "DEQUEUE_MESSAGE interrupted");
1046 ret = -EINTR;
1047 break;
1048 }
1049 spin_lock(&msg_queue_spinlock);
1050 } while (user_service->msg_remove ==
1051 user_service->msg_insert);
1052
1053 if (ret)
1054 break;
1055 }
1056
1057 BUG_ON((int)(user_service->msg_insert -
1058 user_service->msg_remove) < 0);
1059
1060 header = user_service->msg_queue[user_service->msg_remove &
1061 (MSG_QUEUE_SIZE - 1)];
1062 user_service->msg_remove++;
1063 spin_unlock(&msg_queue_spinlock);
1064
1065 up(&user_service->remove_event);
1066 if (header == NULL)
1067 ret = -ENOTCONN;
1068 else if (header->size <= args.bufsize) {
1069 /* Copy to user space if msgbuf is not NULL */
1070 if ((args.buf == NULL) ||
1071 (copy_to_user((void __user *)args.buf,
1072 header->data,
1073 header->size) == 0)) {
1074 ret = header->size;
1075 vchiq_release_message(
1076 service->handle,
1077 header);
1078 } else
1079 ret = -EFAULT;
1080 } else {
1081 vchiq_log_error(vchiq_arm_log_level,
1082 "header %pK: bufsize %x < size %x",
1083 header, args.bufsize, header->size);
1084 WARN(1, "invalid size\n");
1085 ret = -EMSGSIZE;
1086 }
1087 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1088 } break;
1089
1090 case VCHIQ_IOC_GET_CLIENT_ID: {
1091 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1092
1093 ret = vchiq_get_client_id(handle);
1094 } break;
1095
1096 case VCHIQ_IOC_GET_CONFIG: {
1097 VCHIQ_GET_CONFIG_T args;
1098 VCHIQ_CONFIG_T config;
1099
1100 if (copy_from_user(&args, (const void __user *)arg,
1101 sizeof(args)) != 0) {
1102 ret = -EFAULT;
1103 break;
1104 }
1105 if (args.config_size > sizeof(config)) {
1106 ret = -EINVAL;
1107 break;
1108 }
1109 status = vchiq_get_config(instance, args.config_size, &config);
1110 if (status == VCHIQ_SUCCESS) {
1111 if (copy_to_user((void __user *)args.pconfig,
1112 &config, args.config_size) != 0) {
1113 ret = -EFAULT;
1114 break;
1115 }
1116 }
1117 } break;
1118
1119 case VCHIQ_IOC_SET_SERVICE_OPTION: {
1120 VCHIQ_SET_SERVICE_OPTION_T args;
1121
1122 if (copy_from_user(
1123 &args, (const void __user *)arg,
1124 sizeof(args)) != 0) {
1125 ret = -EFAULT;
1126 break;
1127 }
1128
1129 service = find_service_for_instance(instance, args.handle);
1130 if (!service) {
1131 ret = -EINVAL;
1132 break;
1133 }
1134
1135 status = vchiq_set_service_option(
1136 args.handle, args.option, args.value);
1137 } break;
1138
1139 case VCHIQ_IOC_DUMP_PHYS_MEM: {
1140 VCHIQ_DUMP_MEM_T args;
1141
1142 if (copy_from_user
1143 (&args, (const void __user *)arg,
1144 sizeof(args)) != 0) {
1145 ret = -EFAULT;
1146 break;
1147 }
1148 dump_phys_mem(args.virt_addr, args.num_bytes);
1149 } break;
1150
1151 case VCHIQ_IOC_LIB_VERSION: {
1152 unsigned int lib_version = (unsigned int)arg;
1153
1154 if (lib_version < VCHIQ_VERSION_MIN)
1155 ret = -EINVAL;
1156 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1157 instance->use_close_delivered = 1;
1158 } break;
1159
1160 case VCHIQ_IOC_CLOSE_DELIVERED: {
1161 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1162
1163 service = find_closed_service_for_instance(instance, handle);
1164 if (service != NULL) {
1165 USER_SERVICE_T *user_service =
1166 (USER_SERVICE_T *)service->base.userdata;
1167 close_delivered(user_service);
1168 }
1169 else
1170 ret = -EINVAL;
1171 } break;
1172
1173 default:
1174 ret = -ENOTTY;
1175 break;
1176 }
1177
1178 if (service)
1179 unlock_service(service);
1180
1181 if (ret == 0) {
1182 if (status == VCHIQ_ERROR)
1183 ret = -EIO;
1184 else if (status == VCHIQ_RETRY)
1185 ret = -EINTR;
1186 }
1187
1188 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1189 (ret != -EWOULDBLOCK))
1190 vchiq_log_info(vchiq_arm_log_level,
1191 " ioctl instance %lx, cmd %s -> status %d, %ld",
1192 (unsigned long)instance,
1193 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1194 ioctl_names[_IOC_NR(cmd)] :
1195 "<invalid>",
1196 status, ret);
1197 else
1198 vchiq_log_trace(vchiq_arm_log_level,
1199 " ioctl instance %lx, cmd %s -> status %d, %ld",
1200 (unsigned long)instance,
1201 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1202 ioctl_names[_IOC_NR(cmd)] :
1203 "<invalid>",
1204 status, ret);
1205
1206 return ret;
1207 }
1208
1209 /****************************************************************************
1210 *
1211 * vchiq_open
1212 *
1213 ***************************************************************************/
1214
1215 static int
1216 vchiq_open(struct inode *inode, struct file *file)
1217 {
1218 int dev = iminor(inode) & 0x0f;
1219 vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1220 switch (dev) {
1221 case VCHIQ_MINOR: {
1222 int ret;
1223 VCHIQ_STATE_T *state = vchiq_get_state();
1224 VCHIQ_INSTANCE_T instance;
1225
1226 if (!state) {
1227 vchiq_log_error(vchiq_arm_log_level,
1228 "vchiq has no connection to VideoCore");
1229 return -ENOTCONN;
1230 }
1231
1232 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1233 if (!instance)
1234 return -ENOMEM;
1235
1236 instance->state = state;
1237 instance->pid = current->tgid;
1238
1239 ret = vchiq_debugfs_add_instance(instance);
1240 if (ret != 0) {
1241 kfree(instance);
1242 return ret;
1243 }
1244
1245 sema_init(&instance->insert_event, 0);
1246 sema_init(&instance->remove_event, 0);
1247 mutex_init(&instance->completion_mutex);
1248 mutex_init(&instance->bulk_waiter_list_mutex);
1249 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1250
1251 file->private_data = instance;
1252 } break;
1253
1254 default:
1255 vchiq_log_error(vchiq_arm_log_level,
1256 "Unknown minor device: %d", dev);
1257 return -ENXIO;
1258 }
1259
1260 return 0;
1261 }
1262
1263 /****************************************************************************
1264 *
1265 * vchiq_release
1266 *
1267 ***************************************************************************/
1268
1269 static int
1270 vchiq_release(struct inode *inode, struct file *file)
1271 {
1272 int dev = iminor(inode) & 0x0f;
1273 int ret = 0;
1274 switch (dev) {
1275 case VCHIQ_MINOR: {
1276 VCHIQ_INSTANCE_T instance = file->private_data;
1277 VCHIQ_STATE_T *state = vchiq_get_state();
1278 VCHIQ_SERVICE_T *service;
1279 int i;
1280
1281 vchiq_log_info(vchiq_arm_log_level,
1282 "vchiq_release: instance=%lx",
1283 (unsigned long)instance);
1284
1285 if (!state) {
1286 ret = -EPERM;
1287 goto out;
1288 }
1289
1290 /* Ensure videocore is awake to allow termination. */
1291 vchiq_use_internal(instance->state, NULL,
1292 USE_TYPE_VCHIQ);
1293
1294 mutex_lock(&instance->completion_mutex);
1295
1296 /* Wake the completion thread and ask it to exit */
1297 instance->closing = 1;
1298 up(&instance->insert_event);
1299
1300 mutex_unlock(&instance->completion_mutex);
1301
1302 /* Wake the slot handler if the completion queue is full. */
1303 up(&instance->remove_event);
1304
1305 /* Mark all services for termination... */
1306 i = 0;
1307 while ((service = next_service_by_instance(state, instance,
1308 &i)) != NULL) {
1309 USER_SERVICE_T *user_service = service->base.userdata;
1310
1311 /* Wake the slot handler if the msg queue is full. */
1312 up(&user_service->remove_event);
1313
1314 vchiq_terminate_service_internal(service);
1315 unlock_service(service);
1316 }
1317
1318 /* ...and wait for them to die */
1319 i = 0;
1320 while ((service = next_service_by_instance(state, instance, &i))
1321 != NULL) {
1322 USER_SERVICE_T *user_service = service->base.userdata;
1323
1324 down(&service->remove_event);
1325
1326 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1327
1328 spin_lock(&msg_queue_spinlock);
1329
1330 while (user_service->msg_remove !=
1331 user_service->msg_insert) {
1332 VCHIQ_HEADER_T *header = user_service->
1333 msg_queue[user_service->msg_remove &
1334 (MSG_QUEUE_SIZE - 1)];
1335 user_service->msg_remove++;
1336 spin_unlock(&msg_queue_spinlock);
1337
1338 if (header)
1339 vchiq_release_message(
1340 service->handle,
1341 header);
1342 spin_lock(&msg_queue_spinlock);
1343 }
1344
1345 spin_unlock(&msg_queue_spinlock);
1346
1347 unlock_service(service);
1348 }
1349
1350 /* Release any closed services */
1351 while (instance->completion_remove !=
1352 instance->completion_insert) {
1353 VCHIQ_COMPLETION_DATA_T *completion;
1354 VCHIQ_SERVICE_T *service;
1355 completion = &instance->completions[
1356 instance->completion_remove &
1357 (MAX_COMPLETIONS - 1)];
1358 service = completion->service_userdata;
1359 if (completion->reason == VCHIQ_SERVICE_CLOSED)
1360 {
1361 USER_SERVICE_T *user_service =
1362 service->base.userdata;
1363
1364 /* Wake any blocked user-thread */
1365 if (instance->use_close_delivered)
1366 up(&user_service->close_event);
1367 unlock_service(service);
1368 }
1369 instance->completion_remove++;
1370 }
1371
1372 /* Release the PEER service count. */
1373 vchiq_release_internal(instance->state, NULL);
1374
1375 {
1376 struct list_head *pos, *next;
1377 list_for_each_safe(pos, next,
1378 &instance->bulk_waiter_list) {
1379 struct bulk_waiter_node *waiter;
1380 waiter = list_entry(pos,
1381 struct bulk_waiter_node,
1382 list);
1383 list_del(pos);
1384 vchiq_log_info(vchiq_arm_log_level,
1385 "bulk_waiter - cleaned up %pK for pid %d",
1386 waiter, waiter->pid);
1387 kfree(waiter);
1388 }
1389 }
1390
1391 vchiq_debugfs_remove_instance(instance);
1392
1393 kfree(instance);
1394 file->private_data = NULL;
1395 } break;
1396
1397 default:
1398 vchiq_log_error(vchiq_arm_log_level,
1399 "Unknown minor device: %d", dev);
1400 ret = -ENXIO;
1401 }
1402
1403 out:
1404 return ret;
1405 }
1406
1407 /****************************************************************************
1408 *
1409 * vchiq_dump
1410 *
1411 ***************************************************************************/
1412
1413 void
1414 vchiq_dump(void *dump_context, const char *str, int len)
1415 {
1416 DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
1417
1418 if (context->actual < context->space) {
1419 int copy_bytes;
1420 if (context->offset > 0) {
1421 int skip_bytes = min(len, (int)context->offset);
1422 str += skip_bytes;
1423 len -= skip_bytes;
1424 context->offset -= skip_bytes;
1425 if (context->offset > 0)
1426 return;
1427 }
1428 copy_bytes = min(len, (int)(context->space - context->actual));
1429 if (copy_bytes == 0)
1430 return;
1431 if (copy_to_user(context->buf + context->actual, str,
1432 copy_bytes))
1433 context->actual = -EFAULT;
1434 context->actual += copy_bytes;
1435 len -= copy_bytes;
1436
1437 /* If tne terminating NUL is included in the length, then it
1438 ** marks the end of a line and should be replaced with a
1439 ** carriage return. */
1440 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1441 char cr = '\n';
1442 if (copy_to_user(context->buf + context->actual - 1,
1443 &cr, 1))
1444 context->actual = -EFAULT;
1445 }
1446 }
1447 }
1448
1449 /****************************************************************************
1450 *
1451 * vchiq_dump_platform_instance_state
1452 *
1453 ***************************************************************************/
1454
1455 void
1456 vchiq_dump_platform_instances(void *dump_context)
1457 {
1458 VCHIQ_STATE_T *state = vchiq_get_state();
1459 char buf[80];
1460 int len;
1461 int i;
1462
1463 /* There is no list of instances, so instead scan all services,
1464 marking those that have been dumped. */
1465
1466 for (i = 0; i < state->unused_service; i++) {
1467 VCHIQ_SERVICE_T *service = state->services[i];
1468 VCHIQ_INSTANCE_T instance;
1469
1470 if (service && (service->base.callback == service_callback)) {
1471 instance = service->instance;
1472 if (instance)
1473 instance->mark = 0;
1474 }
1475 }
1476
1477 for (i = 0; i < state->unused_service; i++) {
1478 VCHIQ_SERVICE_T *service = state->services[i];
1479 VCHIQ_INSTANCE_T instance;
1480
1481 if (service && (service->base.callback == service_callback)) {
1482 instance = service->instance;
1483 if (instance && !instance->mark) {
1484 len = snprintf(buf, sizeof(buf),
1485 "Instance %pK: pid %d,%s completions %d/%d",
1486 instance, instance->pid,
1487 instance->connected ? " connected, " :
1488 "",
1489 instance->completion_insert -
1490 instance->completion_remove,
1491 MAX_COMPLETIONS);
1492
1493 vchiq_dump(dump_context, buf, len + 1);
1494
1495 instance->mark = 1;
1496 }
1497 }
1498 }
1499 }
1500
1501 /****************************************************************************
1502 *
1503 * vchiq_dump_platform_service_state
1504 *
1505 ***************************************************************************/
1506
1507 void
1508 vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
1509 {
1510 USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
1511 char buf[80];
1512 int len;
1513
1514 len = snprintf(buf, sizeof(buf), " instance %pK", service->instance);
1515
1516 if ((service->base.callback == service_callback) &&
1517 user_service->is_vchi) {
1518 len += snprintf(buf + len, sizeof(buf) - len,
1519 ", %d/%d messages",
1520 user_service->msg_insert - user_service->msg_remove,
1521 MSG_QUEUE_SIZE);
1522
1523 if (user_service->dequeue_pending)
1524 len += snprintf(buf + len, sizeof(buf) - len,
1525 " (dequeue pending)");
1526 }
1527
1528 vchiq_dump(dump_context, buf, len + 1);
1529 }
1530
1531 /****************************************************************************
1532 *
1533 * dump_user_mem
1534 *
1535 ***************************************************************************/
1536
1537 static void
1538 dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1539 {
1540 int rc;
1541 uint8_t *end_virt_addr = virt_addr + num_bytes;
1542 int num_pages;
1543 int offset;
1544 int end_offset;
1545 int page_idx;
1546 int prev_idx;
1547 struct page *page;
1548 struct page **pages;
1549 uint8_t *kmapped_virt_ptr;
1550
1551 /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
1552
1553 virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
1554 end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
1555 ~0x0fuL);
1556
1557 offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
1558 end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
1559
1560 num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1561
1562 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1563 if (pages == NULL) {
1564 vchiq_log_error(vchiq_arm_log_level,
1565 "Unable to allocation memory for %d pages\n",
1566 num_pages);
1567 return;
1568 }
1569
1570 down_read(&current->mm->mmap_sem);
1571 rc = get_user_pages(
1572 (unsigned long)virt_addr, /* start */
1573 num_pages, /* len */
1574 0, /* gup_flags */
1575 pages, /* pages (array of page pointers) */
1576 NULL); /* vmas */
1577 up_read(&current->mm->mmap_sem);
1578
1579 prev_idx = -1;
1580 page = NULL;
1581
1582 if (rc < 0) {
1583 vchiq_log_error(vchiq_arm_log_level,
1584 "Failed to get user pages: %d\n", rc);
1585 goto out;
1586 }
1587
1588 while (offset < end_offset) {
1589
1590 int page_offset = offset % PAGE_SIZE;
1591 page_idx = offset / PAGE_SIZE;
1592
1593 if (page_idx != prev_idx) {
1594
1595 if (page != NULL)
1596 kunmap(page);
1597 page = pages[page_idx];
1598 kmapped_virt_ptr = kmap(page);
1599
1600 prev_idx = page_idx;
1601 }
1602
1603 if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
1604 vchiq_log_dump_mem("ph",
1605 (uint32_t)(unsigned long)&kmapped_virt_ptr[
1606 page_offset],
1607 &kmapped_virt_ptr[page_offset], 16);
1608
1609 offset += 16;
1610 }
1611
1612 out:
1613 if (page != NULL)
1614 kunmap(page);
1615
1616 for (page_idx = 0; page_idx < num_pages; page_idx++)
1617 put_page(pages[page_idx]);
1618
1619 kfree(pages);
1620 }
1621
1622 /****************************************************************************
1623 *
1624 * vchiq_read
1625 *
1626 ***************************************************************************/
1627
1628 static ssize_t
1629 vchiq_read(struct file *file, char __user *buf,
1630 size_t count, loff_t *ppos)
1631 {
1632 DUMP_CONTEXT_T context;
1633 context.buf = buf;
1634 context.actual = 0;
1635 context.space = count;
1636 context.offset = *ppos;
1637
1638 vchiq_dump_state(&context, &g_state);
1639
1640 *ppos += context.actual;
1641
1642 return context.actual;
1643 }
1644
1645 VCHIQ_STATE_T *
1646 vchiq_get_state(void)
1647 {
1648
1649 if (g_state.remote == NULL)
1650 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
1651 else if (g_state.remote->initialised != 1)
1652 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
1653 __func__, g_state.remote->initialised);
1654
1655 return ((g_state.remote != NULL) &&
1656 (g_state.remote->initialised == 1)) ? &g_state : NULL;
1657 }
1658
1659 static const struct file_operations
1660 vchiq_fops = {
1661 .owner = THIS_MODULE,
1662 .unlocked_ioctl = vchiq_ioctl,
1663 .open = vchiq_open,
1664 .release = vchiq_release,
1665 .read = vchiq_read
1666 };
1667
1668 /*
1669 * Autosuspend related functionality
1670 */
1671
1672 int
1673 vchiq_videocore_wanted(VCHIQ_STATE_T *state)
1674 {
1675 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1676 if (!arm_state)
1677 /* autosuspend not supported - always return wanted */
1678 return 1;
1679 else if (arm_state->blocked_count)
1680 return 1;
1681 else if (!arm_state->videocore_use_count)
1682 /* usage count zero - check for override unless we're forcing */
1683 if (arm_state->resume_blocked)
1684 return 0;
1685 else
1686 return vchiq_platform_videocore_wanted(state);
1687 else
1688 /* non-zero usage count - videocore still required */
1689 return 1;
1690 }
1691
1692 static VCHIQ_STATUS_T
1693 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
1694 VCHIQ_HEADER_T *header,
1695 VCHIQ_SERVICE_HANDLE_T service_user,
1696 void *bulk_user)
1697 {
1698 vchiq_log_error(vchiq_susp_log_level,
1699 "%s callback reason %d", __func__, reason);
1700 return 0;
1701 }
1702
1703 static int
1704 vchiq_keepalive_thread_func(void *v)
1705 {
1706 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
1707 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1708
1709 VCHIQ_STATUS_T status;
1710 VCHIQ_INSTANCE_T instance;
1711 VCHIQ_SERVICE_HANDLE_T ka_handle;
1712
1713 VCHIQ_SERVICE_PARAMS_T params = {
1714 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1715 .callback = vchiq_keepalive_vchiq_callback,
1716 .version = KEEPALIVE_VER,
1717 .version_min = KEEPALIVE_VER_MIN
1718 };
1719
1720 status = vchiq_initialise(&instance);
1721 if (status != VCHIQ_SUCCESS) {
1722 vchiq_log_error(vchiq_susp_log_level,
1723 "%s vchiq_initialise failed %d", __func__, status);
1724 goto exit;
1725 }
1726
1727 status = vchiq_connect(instance);
1728 if (status != VCHIQ_SUCCESS) {
1729 vchiq_log_error(vchiq_susp_log_level,
1730 "%s vchiq_connect failed %d", __func__, status);
1731 goto shutdown;
1732 }
1733
1734 status = vchiq_add_service(instance, &params, &ka_handle);
1735 if (status != VCHIQ_SUCCESS) {
1736 vchiq_log_error(vchiq_susp_log_level,
1737 "%s vchiq_open_service failed %d", __func__, status);
1738 goto shutdown;
1739 }
1740
1741 while (1) {
1742 long rc = 0, uc = 0;
1743 if (wait_for_completion_interruptible(&arm_state->ka_evt)
1744 != 0) {
1745 vchiq_log_error(vchiq_susp_log_level,
1746 "%s interrupted", __func__);
1747 flush_signals(current);
1748 continue;
1749 }
1750
1751 /* read and clear counters. Do release_count then use_count to
1752 * prevent getting more releases than uses */
1753 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1754 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1755
1756 /* Call use/release service the requisite number of times.
1757 * Process use before release so use counts don't go negative */
1758 while (uc--) {
1759 atomic_inc(&arm_state->ka_use_ack_count);
1760 status = vchiq_use_service(ka_handle);
1761 if (status != VCHIQ_SUCCESS) {
1762 vchiq_log_error(vchiq_susp_log_level,
1763 "%s vchiq_use_service error %d",
1764 __func__, status);
1765 }
1766 }
1767 while (rc--) {
1768 status = vchiq_release_service(ka_handle);
1769 if (status != VCHIQ_SUCCESS) {
1770 vchiq_log_error(vchiq_susp_log_level,
1771 "%s vchiq_release_service error %d",
1772 __func__, status);
1773 }
1774 }
1775 }
1776
1777 shutdown:
1778 vchiq_shutdown(instance);
1779 exit:
1780 return 0;
1781 }
1782
1783
1784
1785 VCHIQ_STATUS_T
1786 vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
1787 {
1788 if (arm_state) {
1789 rwlock_init(&arm_state->susp_res_lock);
1790
1791 init_completion(&arm_state->ka_evt);
1792 atomic_set(&arm_state->ka_use_count, 0);
1793 atomic_set(&arm_state->ka_use_ack_count, 0);
1794 atomic_set(&arm_state->ka_release_count, 0);
1795
1796 init_completion(&arm_state->vc_suspend_complete);
1797
1798 init_completion(&arm_state->vc_resume_complete);
1799 /* Initialise to 'done' state. We only want to block on resume
1800 * completion while videocore is suspended. */
1801 set_resume_state(arm_state, VC_RESUME_RESUMED);
1802
1803 init_completion(&arm_state->resume_blocker);
1804 /* Initialise to 'done' state. We only want to block on this
1805 * completion while resume is blocked */
1806 complete_all(&arm_state->resume_blocker);
1807
1808 init_completion(&arm_state->blocked_blocker);
1809 /* Initialise to 'done' state. We only want to block on this
1810 * completion while things are waiting on the resume blocker */
1811 complete_all(&arm_state->blocked_blocker);
1812
1813 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
1814 arm_state->suspend_timer_running = 0;
1815 setup_timer(&arm_state->suspend_timer, suspend_timer_callback,
1816 (unsigned long)(state));
1817
1818 arm_state->first_connect = 0;
1819
1820 }
1821 return VCHIQ_SUCCESS;
1822 }
1823
1824 /*
1825 ** Functions to modify the state variables;
1826 ** set_suspend_state
1827 ** set_resume_state
1828 **
1829 ** There are more state variables than we might like, so ensure they remain in
1830 ** step. Suspend and resume state are maintained separately, since most of
1831 ** these state machines can operate independently. However, there are a few
1832 ** states where state transitions in one state machine cause a reset to the
1833 ** other state machine. In addition, there are some completion events which
1834 ** need to occur on state machine reset and end-state(s), so these are also
1835 ** dealt with in these functions.
1836 **
1837 ** In all states we set the state variable according to the input, but in some
1838 ** cases we perform additional steps outlined below;
1839 **
1840 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
1841 ** The suspend completion is completed after any suspend
1842 ** attempt. When we reset the state machine we also reset
1843 ** the completion. This reset occurs when videocore is
1844 ** resumed, and also if we initiate suspend after a suspend
1845 ** failure.
1846 **
1847 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
1848 ** suspend - ie from this point on we must try to suspend
1849 ** before resuming can occur. We therefore also reset the
1850 ** resume state machine to VC_RESUME_IDLE in this state.
1851 **
1852 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
1853 ** complete_all on the suspend completion to notify
1854 ** anything waiting for suspend to happen.
1855 **
1856 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
1857 ** initiate resume, so no need to alter resume state.
1858 ** We call complete_all on the suspend completion to notify
1859 ** of suspend rejection.
1860 **
1861 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
1862 ** suspend completion and reset the resume state machine.
1863 **
1864 ** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
1865 ** resume completion is in it's 'done' state whenever
1866 ** videcore is running. Therefore, the VC_RESUME_IDLE
1867 ** state implies that videocore is suspended.
1868 ** Hence, any thread which needs to wait until videocore is
1869 ** running can wait on this completion - it will only block
1870 ** if videocore is suspended.
1871 **
1872 ** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
1873 ** Call complete_all on the resume completion to unblock
1874 ** any threads waiting for resume. Also reset the suspend
1875 ** state machine to it's idle state.
1876 **
1877 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
1878 */
1879
1880 void
1881 set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
1882 enum vc_suspend_status new_state)
1883 {
1884 /* set the state in all cases */
1885 arm_state->vc_suspend_state = new_state;
1886
1887 /* state specific additional actions */
1888 switch (new_state) {
1889 case VC_SUSPEND_FORCE_CANCELED:
1890 complete_all(&arm_state->vc_suspend_complete);
1891 break;
1892 case VC_SUSPEND_REJECTED:
1893 complete_all(&arm_state->vc_suspend_complete);
1894 break;
1895 case VC_SUSPEND_FAILED:
1896 complete_all(&arm_state->vc_suspend_complete);
1897 arm_state->vc_resume_state = VC_RESUME_RESUMED;
1898 complete_all(&arm_state->vc_resume_complete);
1899 break;
1900 case VC_SUSPEND_IDLE:
1901 reinit_completion(&arm_state->vc_suspend_complete);
1902 break;
1903 case VC_SUSPEND_REQUESTED:
1904 break;
1905 case VC_SUSPEND_IN_PROGRESS:
1906 set_resume_state(arm_state, VC_RESUME_IDLE);
1907 break;
1908 case VC_SUSPEND_SUSPENDED:
1909 complete_all(&arm_state->vc_suspend_complete);
1910 break;
1911 default:
1912 BUG();
1913 break;
1914 }
1915 }
1916
1917 void
1918 set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
1919 enum vc_resume_status new_state)
1920 {
1921 /* set the state in all cases */
1922 arm_state->vc_resume_state = new_state;
1923
1924 /* state specific additional actions */
1925 switch (new_state) {
1926 case VC_RESUME_FAILED:
1927 break;
1928 case VC_RESUME_IDLE:
1929 reinit_completion(&arm_state->vc_resume_complete);
1930 break;
1931 case VC_RESUME_REQUESTED:
1932 break;
1933 case VC_RESUME_IN_PROGRESS:
1934 break;
1935 case VC_RESUME_RESUMED:
1936 complete_all(&arm_state->vc_resume_complete);
1937 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
1938 break;
1939 default:
1940 BUG();
1941 break;
1942 }
1943 }
1944
1945
1946 /* should be called with the write lock held */
1947 inline void
1948 start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1949 {
1950 del_timer(&arm_state->suspend_timer);
1951 arm_state->suspend_timer.expires = jiffies +
1952 msecs_to_jiffies(arm_state->
1953 suspend_timer_timeout);
1954 add_timer(&arm_state->suspend_timer);
1955 arm_state->suspend_timer_running = 1;
1956 }
1957
1958 /* should be called with the write lock held */
1959 static inline void
1960 stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1961 {
1962 if (arm_state->suspend_timer_running) {
1963 del_timer(&arm_state->suspend_timer);
1964 arm_state->suspend_timer_running = 0;
1965 }
1966 }
1967
1968 static inline int
1969 need_resume(VCHIQ_STATE_T *state)
1970 {
1971 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1972 return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
1973 (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
1974 vchiq_videocore_wanted(state);
1975 }
1976
1977 static int
1978 block_resume(VCHIQ_ARM_STATE_T *arm_state)
1979 {
1980 int status = VCHIQ_SUCCESS;
1981 const unsigned long timeout_val =
1982 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
1983 int resume_count = 0;
1984
1985 /* Allow any threads which were blocked by the last force suspend to
1986 * complete if they haven't already. Only give this one shot; if
1987 * blocked_count is incremented after blocked_blocker is completed
1988 * (which only happens when blocked_count hits 0) then those threads
1989 * will have to wait until next time around */
1990 if (arm_state->blocked_count) {
1991 reinit_completion(&arm_state->blocked_blocker);
1992 write_unlock_bh(&arm_state->susp_res_lock);
1993 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
1994 "blocked clients", __func__);
1995 if (wait_for_completion_interruptible_timeout(
1996 &arm_state->blocked_blocker, timeout_val)
1997 <= 0) {
1998 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
1999 "previously blocked clients failed" , __func__);
2000 status = VCHIQ_ERROR;
2001 write_lock_bh(&arm_state->susp_res_lock);
2002 goto out;
2003 }
2004 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
2005 "clients resumed", __func__);
2006 write_lock_bh(&arm_state->susp_res_lock);
2007 }
2008
2009 /* We need to wait for resume to complete if it's in process */
2010 while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
2011 arm_state->vc_resume_state > VC_RESUME_IDLE) {
2012 if (resume_count > 1) {
2013 status = VCHIQ_ERROR;
2014 vchiq_log_error(vchiq_susp_log_level, "%s waited too "
2015 "many times for resume" , __func__);
2016 goto out;
2017 }
2018 write_unlock_bh(&arm_state->susp_res_lock);
2019 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
2020 __func__);
2021 if (wait_for_completion_interruptible_timeout(
2022 &arm_state->vc_resume_complete, timeout_val)
2023 <= 0) {
2024 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
2025 "resume failed (%s)", __func__,
2026 resume_state_names[arm_state->vc_resume_state +
2027 VC_RESUME_NUM_OFFSET]);
2028 status = VCHIQ_ERROR;
2029 write_lock_bh(&arm_state->susp_res_lock);
2030 goto out;
2031 }
2032 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
2033 write_lock_bh(&arm_state->susp_res_lock);
2034 resume_count++;
2035 }
2036 reinit_completion(&arm_state->resume_blocker);
2037 arm_state->resume_blocked = 1;
2038
2039 out:
2040 return status;
2041 }
2042
2043 static inline void
2044 unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
2045 {
2046 complete_all(&arm_state->resume_blocker);
2047 arm_state->resume_blocked = 0;
2048 }
2049
2050 /* Initiate suspend via slot handler. Should be called with the write lock
2051 * held */
2052 VCHIQ_STATUS_T
2053 vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
2054 {
2055 VCHIQ_STATUS_T status = VCHIQ_ERROR;
2056 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2057
2058 if (!arm_state)
2059 goto out;
2060
2061 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2062 status = VCHIQ_SUCCESS;
2063
2064
2065 switch (arm_state->vc_suspend_state) {
2066 case VC_SUSPEND_REQUESTED:
2067 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
2068 "requested", __func__);
2069 break;
2070 case VC_SUSPEND_IN_PROGRESS:
2071 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
2072 "progress", __func__);
2073 break;
2074
2075 default:
2076 /* We don't expect to be in other states, so log but continue
2077 * anyway */
2078 vchiq_log_error(vchiq_susp_log_level,
2079 "%s unexpected suspend state %s", __func__,
2080 suspend_state_names[arm_state->vc_suspend_state +
2081 VC_SUSPEND_NUM_OFFSET]);
2082 /* fall through */
2083 case VC_SUSPEND_REJECTED:
2084 case VC_SUSPEND_FAILED:
2085 /* Ensure any idle state actions have been run */
2086 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2087 /* fall through */
2088 case VC_SUSPEND_IDLE:
2089 vchiq_log_info(vchiq_susp_log_level,
2090 "%s: suspending", __func__);
2091 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
2092 /* kick the slot handler thread to initiate suspend */
2093 request_poll(state, NULL, 0);
2094 break;
2095 }
2096
2097 out:
2098 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2099 return status;
2100 }
2101
2102 void
2103 vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
2104 {
2105 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2106 int susp = 0;
2107
2108 if (!arm_state)
2109 goto out;
2110
2111 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2112
2113 write_lock_bh(&arm_state->susp_res_lock);
2114 if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2115 arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2116 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2117 susp = 1;
2118 }
2119 write_unlock_bh(&arm_state->susp_res_lock);
2120
2121 if (susp)
2122 vchiq_platform_suspend(state);
2123
2124 out:
2125 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2126 return;
2127 }
2128
2129
2130 static void
2131 output_timeout_error(VCHIQ_STATE_T *state)
2132 {
2133 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2134 char err[50] = "";
2135 int vc_use_count = arm_state->videocore_use_count;
2136 int active_services = state->unused_service;
2137 int i;
2138
2139 if (!arm_state->videocore_use_count) {
2140 snprintf(err, sizeof(err), " Videocore usecount is 0");
2141 goto output_msg;
2142 }
2143 for (i = 0; i < active_services; i++) {
2144 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2145 if (service_ptr && service_ptr->service_use_count &&
2146 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
2147 snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
2148 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2149 service_ptr->base.fourcc),
2150 service_ptr->client_id,
2151 service_ptr->service_use_count,
2152 service_ptr->service_use_count ==
2153 vc_use_count ? "" : " (+ more)");
2154 break;
2155 }
2156 }
2157
2158 output_msg:
2159 vchiq_log_error(vchiq_susp_log_level,
2160 "timed out waiting for vc suspend (%d).%s",
2161 arm_state->autosuspend_override, err);
2162
2163 }
2164
2165 /* Try to get videocore into suspended state, regardless of autosuspend state.
2166 ** We don't actually force suspend, since videocore may get into a bad state
2167 ** if we force suspend at a bad time. Instead, we wait for autosuspend to
2168 ** determine a good point to suspend. If this doesn't happen within 100ms we
2169 ** report failure.
2170 **
2171 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2172 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2173 */
2174 VCHIQ_STATUS_T
2175 vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
2176 {
2177 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2178 VCHIQ_STATUS_T status = VCHIQ_ERROR;
2179 long rc = 0;
2180 int repeat = -1;
2181
2182 if (!arm_state)
2183 goto out;
2184
2185 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2186
2187 write_lock_bh(&arm_state->susp_res_lock);
2188
2189 status = block_resume(arm_state);
2190 if (status != VCHIQ_SUCCESS)
2191 goto unlock;
2192 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2193 /* Already suspended - just block resume and exit */
2194 vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
2195 __func__);
2196 status = VCHIQ_SUCCESS;
2197 goto unlock;
2198 } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
2199 /* initiate suspend immediately in the case that we're waiting
2200 * for the timeout */
2201 stop_suspend_timer(arm_state);
2202 if (!vchiq_videocore_wanted(state)) {
2203 vchiq_log_info(vchiq_susp_log_level, "%s videocore "
2204 "idle, initiating suspend", __func__);
2205 status = vchiq_arm_vcsuspend(state);
2206 } else if (arm_state->autosuspend_override <
2207 FORCE_SUSPEND_FAIL_MAX) {
2208 vchiq_log_info(vchiq_susp_log_level, "%s letting "
2209 "videocore go idle", __func__);
2210 status = VCHIQ_SUCCESS;
2211 } else {
2212 vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
2213 "many times - attempting suspend", __func__);
2214 status = vchiq_arm_vcsuspend(state);
2215 }
2216 } else {
2217 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
2218 "in progress - wait for completion", __func__);
2219 status = VCHIQ_SUCCESS;
2220 }
2221
2222 /* Wait for suspend to happen due to system idle (not forced..) */
2223 if (status != VCHIQ_SUCCESS)
2224 goto unblock_resume;
2225
2226 do {
2227 write_unlock_bh(&arm_state->susp_res_lock);
2228
2229 rc = wait_for_completion_interruptible_timeout(
2230 &arm_state->vc_suspend_complete,
2231 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
2232
2233 write_lock_bh(&arm_state->susp_res_lock);
2234 if (rc < 0) {
2235 vchiq_log_warning(vchiq_susp_log_level, "%s "
2236 "interrupted waiting for suspend", __func__);
2237 status = VCHIQ_ERROR;
2238 goto unblock_resume;
2239 } else if (rc == 0) {
2240 if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
2241 /* Repeat timeout once if in progress */
2242 if (repeat < 0) {
2243 repeat = 1;
2244 continue;
2245 }
2246 }
2247 arm_state->autosuspend_override++;
2248 output_timeout_error(state);
2249
2250 status = VCHIQ_RETRY;
2251 goto unblock_resume;
2252 }
2253 } while (0 < (repeat--));
2254
2255 /* Check and report state in case we need to abort ARM suspend */
2256 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
2257 status = VCHIQ_RETRY;
2258 vchiq_log_error(vchiq_susp_log_level,
2259 "%s videocore suspend failed (state %s)", __func__,
2260 suspend_state_names[arm_state->vc_suspend_state +
2261 VC_SUSPEND_NUM_OFFSET]);
2262 /* Reset the state only if it's still in an error state.
2263 * Something could have already initiated another suspend. */
2264 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
2265 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2266
2267 goto unblock_resume;
2268 }
2269
2270 /* successfully suspended - unlock and exit */
2271 goto unlock;
2272
2273 unblock_resume:
2274 /* all error states need to unblock resume before exit */
2275 unblock_resume(arm_state);
2276
2277 unlock:
2278 write_unlock_bh(&arm_state->susp_res_lock);
2279
2280 out:
2281 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2282 return status;
2283 }
2284
2285 void
2286 vchiq_check_suspend(VCHIQ_STATE_T *state)
2287 {
2288 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2289
2290 if (!arm_state)
2291 goto out;
2292
2293 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2294
2295 write_lock_bh(&arm_state->susp_res_lock);
2296 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2297 arm_state->first_connect &&
2298 !vchiq_videocore_wanted(state)) {
2299 vchiq_arm_vcsuspend(state);
2300 }
2301 write_unlock_bh(&arm_state->susp_res_lock);
2302
2303 out:
2304 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2305 return;
2306 }
2307
2308
2309 int
2310 vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
2311 {
2312 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2313 int resume = 0;
2314 int ret = -1;
2315
2316 if (!arm_state)
2317 goto out;
2318
2319 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2320
2321 write_lock_bh(&arm_state->susp_res_lock);
2322 unblock_resume(arm_state);
2323 resume = vchiq_check_resume(state);
2324 write_unlock_bh(&arm_state->susp_res_lock);
2325
2326 if (resume) {
2327 if (wait_for_completion_interruptible(
2328 &arm_state->vc_resume_complete) < 0) {
2329 vchiq_log_error(vchiq_susp_log_level,
2330 "%s interrupted", __func__);
2331 /* failed, cannot accurately derive suspend
2332 * state, so exit early. */
2333 goto out;
2334 }
2335 }
2336
2337 read_lock_bh(&arm_state->susp_res_lock);
2338 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2339 vchiq_log_info(vchiq_susp_log_level,
2340 "%s: Videocore remains suspended", __func__);
2341 } else {
2342 vchiq_log_info(vchiq_susp_log_level,
2343 "%s: Videocore resumed", __func__);
2344 ret = 0;
2345 }
2346 read_unlock_bh(&arm_state->susp_res_lock);
2347 out:
2348 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2349 return ret;
2350 }
2351
2352 /* This function should be called with the write lock held */
2353 int
2354 vchiq_check_resume(VCHIQ_STATE_T *state)
2355 {
2356 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2357 int resume = 0;
2358
2359 if (!arm_state)
2360 goto out;
2361
2362 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2363
2364 if (need_resume(state)) {
2365 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2366 request_poll(state, NULL, 0);
2367 resume = 1;
2368 }
2369
2370 out:
2371 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2372 return resume;
2373 }
2374
2375 void
2376 vchiq_platform_check_resume(VCHIQ_STATE_T *state)
2377 {
2378 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2379 int res = 0;
2380
2381 if (!arm_state)
2382 goto out;
2383
2384 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2385
2386 write_lock_bh(&arm_state->susp_res_lock);
2387 if (arm_state->wake_address == 0) {
2388 vchiq_log_info(vchiq_susp_log_level,
2389 "%s: already awake", __func__);
2390 goto unlock;
2391 }
2392 if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
2393 vchiq_log_info(vchiq_susp_log_level,
2394 "%s: already resuming", __func__);
2395 goto unlock;
2396 }
2397
2398 if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
2399 set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
2400 res = 1;
2401 } else
2402 vchiq_log_trace(vchiq_susp_log_level,
2403 "%s: not resuming (resume state %s)", __func__,
2404 resume_state_names[arm_state->vc_resume_state +
2405 VC_RESUME_NUM_OFFSET]);
2406
2407 unlock:
2408 write_unlock_bh(&arm_state->susp_res_lock);
2409
2410 if (res)
2411 vchiq_platform_resume(state);
2412
2413 out:
2414 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2415 return;
2416
2417 }
2418
2419
2420
2421 VCHIQ_STATUS_T
2422 vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
2423 enum USE_TYPE_E use_type)
2424 {
2425 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2426 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2427 char entity[16];
2428 int *entity_uc;
2429 int local_uc, local_entity_uc;
2430
2431 if (!arm_state)
2432 goto out;
2433
2434 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2435
2436 if (use_type == USE_TYPE_VCHIQ) {
2437 sprintf(entity, "VCHIQ: ");
2438 entity_uc = &arm_state->peer_use_count;
2439 } else if (service) {
2440 sprintf(entity, "%c%c%c%c:%03d",
2441 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2442 service->client_id);
2443 entity_uc = &service->service_use_count;
2444 } else {
2445 vchiq_log_error(vchiq_susp_log_level, "%s null service "
2446 "ptr", __func__);
2447 ret = VCHIQ_ERROR;
2448 goto out;
2449 }
2450
2451 write_lock_bh(&arm_state->susp_res_lock);
2452 while (arm_state->resume_blocked) {
2453 /* If we call 'use' while force suspend is waiting for suspend,
2454 * then we're about to block the thread which the force is
2455 * waiting to complete, so we're bound to just time out. In this
2456 * case, set the suspend state such that the wait will be
2457 * canceled, so we can complete as quickly as possible. */
2458 if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2459 VC_SUSPEND_IDLE) {
2460 set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2461 break;
2462 }
2463 /* If suspend is already in progress then we need to block */
2464 if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2465 /* Indicate that there are threads waiting on the resume
2466 * blocker. These need to be allowed to complete before
2467 * a _second_ call to force suspend can complete,
2468 * otherwise low priority threads might never actually
2469 * continue */
2470 arm_state->blocked_count++;
2471 write_unlock_bh(&arm_state->susp_res_lock);
2472 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2473 "blocked - waiting...", __func__, entity);
2474 if (wait_for_completion_killable(
2475 &arm_state->resume_blocker) != 0) {
2476 vchiq_log_error(vchiq_susp_log_level, "%s %s "
2477 "wait for resume blocker interrupted",
2478 __func__, entity);
2479 ret = VCHIQ_ERROR;
2480 write_lock_bh(&arm_state->susp_res_lock);
2481 arm_state->blocked_count--;
2482 write_unlock_bh(&arm_state->susp_res_lock);
2483 goto out;
2484 }
2485 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2486 "unblocked", __func__, entity);
2487 write_lock_bh(&arm_state->susp_res_lock);
2488 if (--arm_state->blocked_count == 0)
2489 complete_all(&arm_state->blocked_blocker);
2490 }
2491 }
2492
2493 stop_suspend_timer(arm_state);
2494
2495 local_uc = ++arm_state->videocore_use_count;
2496 local_entity_uc = ++(*entity_uc);
2497
2498 /* If there's a pending request which hasn't yet been serviced then
2499 * just clear it. If we're past VC_SUSPEND_REQUESTED state then
2500 * vc_resume_complete will block until we either resume or fail to
2501 * suspend */
2502 if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2503 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2504
2505 if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2506 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2507 vchiq_log_info(vchiq_susp_log_level,
2508 "%s %s count %d, state count %d",
2509 __func__, entity, local_entity_uc, local_uc);
2510 request_poll(state, NULL, 0);
2511 } else
2512 vchiq_log_trace(vchiq_susp_log_level,
2513 "%s %s count %d, state count %d",
2514 __func__, entity, *entity_uc, local_uc);
2515
2516
2517 write_unlock_bh(&arm_state->susp_res_lock);
2518
2519 /* Completion is in a done state when we're not suspended, so this won't
2520 * block for the non-suspended case. */
2521 if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
2522 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
2523 __func__, entity);
2524 if (wait_for_completion_killable(
2525 &arm_state->vc_resume_complete) != 0) {
2526 vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
2527 "resume interrupted", __func__, entity);
2528 ret = VCHIQ_ERROR;
2529 goto out;
2530 }
2531 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
2532 entity);
2533 }
2534
2535 if (ret == VCHIQ_SUCCESS) {
2536 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2537 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2538 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2539 /* Send the use notify to videocore */
2540 status = vchiq_send_remote_use_active(state);
2541 if (status == VCHIQ_SUCCESS)
2542 ack_cnt--;
2543 else
2544 atomic_add(ack_cnt,
2545 &arm_state->ka_use_ack_count);
2546 }
2547 }
2548
2549 out:
2550 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2551 return ret;
2552 }
2553
2554 VCHIQ_STATUS_T
2555 vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
2556 {
2557 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2558 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2559 char entity[16];
2560 int *entity_uc;
2561 int local_uc, local_entity_uc;
2562
2563 if (!arm_state)
2564 goto out;
2565
2566 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2567
2568 if (service) {
2569 sprintf(entity, "%c%c%c%c:%03d",
2570 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2571 service->client_id);
2572 entity_uc = &service->service_use_count;
2573 } else {
2574 sprintf(entity, "PEER: ");
2575 entity_uc = &arm_state->peer_use_count;
2576 }
2577
2578 write_lock_bh(&arm_state->susp_res_lock);
2579 if (!arm_state->videocore_use_count || !(*entity_uc)) {
2580 /* Don't use BUG_ON - don't allow user thread to crash kernel */
2581 WARN_ON(!arm_state->videocore_use_count);
2582 WARN_ON(!(*entity_uc));
2583 ret = VCHIQ_ERROR;
2584 goto unlock;
2585 }
2586 local_uc = --arm_state->videocore_use_count;
2587 local_entity_uc = --(*entity_uc);
2588
2589 if (!vchiq_videocore_wanted(state)) {
2590 if (vchiq_platform_use_suspend_timer() &&
2591 !arm_state->resume_blocked) {
2592 /* Only use the timer if we're not trying to force
2593 * suspend (=> resume_blocked) */
2594 start_suspend_timer(arm_state);
2595 } else {
2596 vchiq_log_info(vchiq_susp_log_level,
2597 "%s %s count %d, state count %d - suspending",
2598 __func__, entity, *entity_uc,
2599 arm_state->videocore_use_count);
2600 vchiq_arm_vcsuspend(state);
2601 }
2602 } else
2603 vchiq_log_trace(vchiq_susp_log_level,
2604 "%s %s count %d, state count %d",
2605 __func__, entity, *entity_uc,
2606 arm_state->videocore_use_count);
2607
2608 unlock:
2609 write_unlock_bh(&arm_state->susp_res_lock);
2610
2611 out:
2612 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2613 return ret;
2614 }
2615
2616 void
2617 vchiq_on_remote_use(VCHIQ_STATE_T *state)
2618 {
2619 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2620 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2621 atomic_inc(&arm_state->ka_use_count);
2622 complete(&arm_state->ka_evt);
2623 }
2624
2625 void
2626 vchiq_on_remote_release(VCHIQ_STATE_T *state)
2627 {
2628 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2629 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2630 atomic_inc(&arm_state->ka_release_count);
2631 complete(&arm_state->ka_evt);
2632 }
2633
2634 VCHIQ_STATUS_T
2635 vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
2636 {
2637 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2638 }
2639
2640 VCHIQ_STATUS_T
2641 vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
2642 {
2643 return vchiq_release_internal(service->state, service);
2644 }
2645
2646 VCHIQ_DEBUGFS_NODE_T *
2647 vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
2648 {
2649 return &instance->debugfs_node;
2650 }
2651
2652 int
2653 vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
2654 {
2655 VCHIQ_SERVICE_T *service;
2656 int use_count = 0, i;
2657 i = 0;
2658 while ((service = next_service_by_instance(instance->state,
2659 instance, &i)) != NULL) {
2660 use_count += service->service_use_count;
2661 unlock_service(service);
2662 }
2663 return use_count;
2664 }
2665
2666 int
2667 vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
2668 {
2669 return instance->pid;
2670 }
2671
2672 int
2673 vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
2674 {
2675 return instance->trace;
2676 }
2677
2678 void
2679 vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
2680 {
2681 VCHIQ_SERVICE_T *service;
2682 int i;
2683 i = 0;
2684 while ((service = next_service_by_instance(instance->state,
2685 instance, &i)) != NULL) {
2686 service->trace = trace;
2687 unlock_service(service);
2688 }
2689 instance->trace = (trace != 0);
2690 }
2691
2692 static void suspend_timer_callback(unsigned long context)
2693 {
2694 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
2695 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2696 if (!arm_state)
2697 goto out;
2698 vchiq_log_info(vchiq_susp_log_level,
2699 "%s - suspend timer expired - check suspend", __func__);
2700 vchiq_check_suspend(state);
2701 out:
2702 return;
2703 }
2704
2705 VCHIQ_STATUS_T
2706 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
2707 {
2708 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2709 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2710 if (service) {
2711 ret = vchiq_use_internal(service->state, service,
2712 USE_TYPE_SERVICE_NO_RESUME);
2713 unlock_service(service);
2714 }
2715 return ret;
2716 }
2717
2718 VCHIQ_STATUS_T
2719 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
2720 {
2721 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2722 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2723 if (service) {
2724 ret = vchiq_use_internal(service->state, service,
2725 USE_TYPE_SERVICE);
2726 unlock_service(service);
2727 }
2728 return ret;
2729 }
2730
2731 VCHIQ_STATUS_T
2732 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
2733 {
2734 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2735 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2736 if (service) {
2737 ret = vchiq_release_internal(service->state, service);
2738 unlock_service(service);
2739 }
2740 return ret;
2741 }
2742
2743 void
2744 vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
2745 {
2746 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2747 int i, j = 0;
2748 /* Only dump 64 services */
2749 static const int local_max_services = 64;
2750 /* If there's more than 64 services, only dump ones with
2751 * non-zero counts */
2752 int only_nonzero = 0;
2753 static const char *nz = "<-- preventing suspend";
2754
2755 enum vc_suspend_status vc_suspend_state;
2756 enum vc_resume_status vc_resume_state;
2757 int peer_count;
2758 int vc_use_count;
2759 int active_services;
2760 struct service_data_struct {
2761 int fourcc;
2762 int clientid;
2763 int use_count;
2764 } service_data[local_max_services];
2765
2766 if (!arm_state)
2767 return;
2768
2769 read_lock_bh(&arm_state->susp_res_lock);
2770 vc_suspend_state = arm_state->vc_suspend_state;
2771 vc_resume_state = arm_state->vc_resume_state;
2772 peer_count = arm_state->peer_use_count;
2773 vc_use_count = arm_state->videocore_use_count;
2774 active_services = state->unused_service;
2775 if (active_services > local_max_services)
2776 only_nonzero = 1;
2777
2778 for (i = 0; (i < active_services) && (j < local_max_services); i++) {
2779 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2780 if (!service_ptr)
2781 continue;
2782
2783 if (only_nonzero && !service_ptr->service_use_count)
2784 continue;
2785
2786 if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
2787 service_data[j].fourcc = service_ptr->base.fourcc;
2788 service_data[j].clientid = service_ptr->client_id;
2789 service_data[j++].use_count = service_ptr->
2790 service_use_count;
2791 }
2792 }
2793
2794 read_unlock_bh(&arm_state->susp_res_lock);
2795
2796 vchiq_log_warning(vchiq_susp_log_level,
2797 "-- Videcore suspend state: %s --",
2798 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
2799 vchiq_log_warning(vchiq_susp_log_level,
2800 "-- Videcore resume state: %s --",
2801 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
2802
2803 if (only_nonzero)
2804 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2805 "services (%d). Only dumping up to first %d services "
2806 "with non-zero use-count", active_services,
2807 local_max_services);
2808
2809 for (i = 0; i < j; i++) {
2810 vchiq_log_warning(vchiq_susp_log_level,
2811 "----- %c%c%c%c:%d service count %d %s",
2812 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2813 service_data[i].clientid,
2814 service_data[i].use_count,
2815 service_data[i].use_count ? nz : "");
2816 }
2817 vchiq_log_warning(vchiq_susp_log_level,
2818 "----- VCHIQ use count count %d", peer_count);
2819 vchiq_log_warning(vchiq_susp_log_level,
2820 "--- Overall vchiq instance use count %d", vc_use_count);
2821
2822 vchiq_dump_platform_use_state(state);
2823 }
2824
2825 VCHIQ_STATUS_T
2826 vchiq_check_service(VCHIQ_SERVICE_T *service)
2827 {
2828 VCHIQ_ARM_STATE_T *arm_state;
2829 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2830
2831 if (!service || !service->state)
2832 goto out;
2833
2834 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2835
2836 arm_state = vchiq_platform_get_arm_state(service->state);
2837
2838 read_lock_bh(&arm_state->susp_res_lock);
2839 if (service->service_use_count)
2840 ret = VCHIQ_SUCCESS;
2841 read_unlock_bh(&arm_state->susp_res_lock);
2842
2843 if (ret == VCHIQ_ERROR) {
2844 vchiq_log_error(vchiq_susp_log_level,
2845 "%s ERROR - %c%c%c%c:%d service count %d, "
2846 "state count %d, videocore suspend state %s", __func__,
2847 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2848 service->client_id, service->service_use_count,
2849 arm_state->videocore_use_count,
2850 suspend_state_names[arm_state->vc_suspend_state +
2851 VC_SUSPEND_NUM_OFFSET]);
2852 vchiq_dump_service_use_state(service->state);
2853 }
2854 out:
2855 return ret;
2856 }
2857
2858 /* stub functions */
2859 void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
2860 {
2861 (void)state;
2862 }
2863
2864 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
2865 VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
2866 {
2867 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2868 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2869 get_conn_state_name(oldstate), get_conn_state_name(newstate));
2870 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
2871 write_lock_bh(&arm_state->susp_res_lock);
2872 if (!arm_state->first_connect) {
2873 char threadname[10];
2874 arm_state->first_connect = 1;
2875 write_unlock_bh(&arm_state->susp_res_lock);
2876 snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
2877 state->id);
2878 arm_state->ka_thread = kthread_create(
2879 &vchiq_keepalive_thread_func,
2880 (void *)state,
2881 threadname);
2882 if (IS_ERR(arm_state->ka_thread)) {
2883 vchiq_log_error(vchiq_susp_log_level,
2884 "vchiq: FATAL: couldn't create thread %s",
2885 threadname);
2886 } else {
2887 wake_up_process(arm_state->ka_thread);
2888 }
2889 } else
2890 write_unlock_bh(&arm_state->susp_res_lock);
2891 }
2892 }
2893
2894 static int vchiq_probe(struct platform_device *pdev)
2895 {
2896 struct device_node *fw_node;
2897 struct rpi_firmware *fw;
2898 int err;
2899 void *ptr_err;
2900
2901 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
2902 if (!fw_node) {
2903 dev_err(&pdev->dev, "Missing firmware node\n");
2904 return -ENOENT;
2905 }
2906
2907 fw = rpi_firmware_get(fw_node);
2908 of_node_put(fw_node);
2909 if (!fw)
2910 return -EPROBE_DEFER;
2911
2912 platform_set_drvdata(pdev, fw);
2913
2914 err = vchiq_platform_init(pdev, &g_state);
2915 if (err != 0)
2916 goto failed_platform_init;
2917
2918 err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
2919 if (err != 0) {
2920 vchiq_log_error(vchiq_arm_log_level,
2921 "Unable to allocate device number");
2922 goto failed_platform_init;
2923 }
2924 cdev_init(&vchiq_cdev, &vchiq_fops);
2925 vchiq_cdev.owner = THIS_MODULE;
2926 err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
2927 if (err != 0) {
2928 vchiq_log_error(vchiq_arm_log_level,
2929 "Unable to register device");
2930 goto failed_cdev_add;
2931 }
2932
2933 /* create sysfs entries */
2934 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
2935 ptr_err = vchiq_class;
2936 if (IS_ERR(ptr_err))
2937 goto failed_class_create;
2938
2939 vchiq_dev = device_create(vchiq_class, NULL,
2940 vchiq_devid, NULL, "vchiq");
2941 ptr_err = vchiq_dev;
2942 if (IS_ERR(ptr_err))
2943 goto failed_device_create;
2944
2945 /* create debugfs entries */
2946 err = vchiq_debugfs_init();
2947 if (err != 0)
2948 goto failed_debugfs_init;
2949
2950 vchiq_log_info(vchiq_arm_log_level,
2951 "vchiq: initialised - version %d (min %d), device %d.%d",
2952 VCHIQ_VERSION, VCHIQ_VERSION_MIN,
2953 MAJOR(vchiq_devid), MINOR(vchiq_devid));
2954
2955 return 0;
2956
2957 failed_debugfs_init:
2958 device_destroy(vchiq_class, vchiq_devid);
2959 failed_device_create:
2960 class_destroy(vchiq_class);
2961 failed_class_create:
2962 cdev_del(&vchiq_cdev);
2963 err = PTR_ERR(ptr_err);
2964 failed_cdev_add:
2965 unregister_chrdev_region(vchiq_devid, 1);
2966 failed_platform_init:
2967 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2968 return err;
2969 }
2970
2971 static int vchiq_remove(struct platform_device *pdev)
2972 {
2973 vchiq_debugfs_deinit();
2974 device_destroy(vchiq_class, vchiq_devid);
2975 class_destroy(vchiq_class);
2976 cdev_del(&vchiq_cdev);
2977 unregister_chrdev_region(vchiq_devid, 1);
2978
2979 return 0;
2980 }
2981
2982 static const struct of_device_id vchiq_of_match[] = {
2983 { .compatible = "brcm,bcm2835-vchiq", },
2984 {},
2985 };
2986 MODULE_DEVICE_TABLE(of, vchiq_of_match);
2987
2988 static struct platform_driver vchiq_driver = {
2989 .driver = {
2990 .name = "bcm2835_vchiq",
2991 .of_match_table = vchiq_of_match,
2992 },
2993 .probe = vchiq_probe,
2994 .remove = vchiq_remove,
2995 };
2996 module_platform_driver(vchiq_driver);
2997
2998 MODULE_LICENSE("GPL");
2999 MODULE_DESCRIPTION("Videocore VCHIQ driver");
3000 MODULE_AUTHOR("Broadcom Corporation");