]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
vchiq_arm: Avoid use of mutex in add_completion
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_arm.c
1 /**
2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
3 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The names of the above-listed copyright holders may not be used
15 * to endorse or promote products derived from this software without
16 * specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2, as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/cdev.h>
40 #include <linux/fs.h>
41 #include <linux/device.h>
42 #include <linux/mm.h>
43 #include <linux/highmem.h>
44 #include <linux/pagemap.h>
45 #include <linux/bug.h>
46 #include <linux/semaphore.h>
47 #include <linux/list.h>
48 #include <linux/of.h>
49 #include <linux/platform_device.h>
50 #include <soc/bcm2835/raspberrypi-firmware.h>
51
52 #include "vchiq_core.h"
53 #include "vchiq_ioctl.h"
54 #include "vchiq_arm.h"
55 #include "vchiq_debugfs.h"
56 #include "vchiq_killable.h"
57
58 #define DEVICE_NAME "vchiq"
59
60 /* Override the default prefix, which would be vchiq_arm (from the filename) */
61 #undef MODULE_PARAM_PREFIX
62 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
63
64 #define VCHIQ_MINOR 0
65
66 /* Some per-instance constants */
67 #define MAX_COMPLETIONS 128
68 #define MAX_SERVICES 64
69 #define MAX_ELEMENTS 8
70 #define MSG_QUEUE_SIZE 128
71
72 #define KEEPALIVE_VER 1
73 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
74
75 /* Run time control of log level, based on KERN_XXX level. */
76 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
77 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
78
79 #define SUSPEND_TIMER_TIMEOUT_MS 100
80 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
81
82 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
83 static const char *const suspend_state_names[] = {
84 "VC_SUSPEND_FORCE_CANCELED",
85 "VC_SUSPEND_REJECTED",
86 "VC_SUSPEND_FAILED",
87 "VC_SUSPEND_IDLE",
88 "VC_SUSPEND_REQUESTED",
89 "VC_SUSPEND_IN_PROGRESS",
90 "VC_SUSPEND_SUSPENDED"
91 };
92 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
93 static const char *const resume_state_names[] = {
94 "VC_RESUME_FAILED",
95 "VC_RESUME_IDLE",
96 "VC_RESUME_REQUESTED",
97 "VC_RESUME_IN_PROGRESS",
98 "VC_RESUME_RESUMED"
99 };
100 /* The number of times we allow force suspend to timeout before actually
101 ** _forcing_ suspend. This is to cater for SW which fails to release vchiq
102 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
103 */
104 #define FORCE_SUSPEND_FAIL_MAX 8
105
106 /* The time in ms allowed for videocore to go idle when force suspend has been
107 * requested */
108 #define FORCE_SUSPEND_TIMEOUT_MS 200
109
110
111 static void suspend_timer_callback(unsigned long context);
112
113
114 typedef struct user_service_struct {
115 VCHIQ_SERVICE_T *service;
116 void *userdata;
117 VCHIQ_INSTANCE_T instance;
118 char is_vchi;
119 char dequeue_pending;
120 char close_pending;
121 int message_available_pos;
122 int msg_insert;
123 int msg_remove;
124 struct semaphore insert_event;
125 struct semaphore remove_event;
126 struct semaphore close_event;
127 VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
128 } USER_SERVICE_T;
129
130 struct bulk_waiter_node {
131 struct bulk_waiter bulk_waiter;
132 int pid;
133 struct list_head list;
134 };
135
136 struct vchiq_instance_struct {
137 VCHIQ_STATE_T *state;
138 VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
139 int completion_insert;
140 int completion_remove;
141 struct semaphore insert_event;
142 struct semaphore remove_event;
143 struct mutex completion_mutex;
144
145 int connected;
146 int closing;
147 int pid;
148 int mark;
149 int use_close_delivered;
150 int trace;
151
152 struct list_head bulk_waiter_list;
153 struct mutex bulk_waiter_list_mutex;
154
155 VCHIQ_DEBUGFS_NODE_T debugfs_node;
156 };
157
158 typedef struct dump_context_struct {
159 char __user *buf;
160 size_t actual;
161 size_t space;
162 loff_t offset;
163 } DUMP_CONTEXT_T;
164
165 static struct cdev vchiq_cdev;
166 static dev_t vchiq_devid;
167 static VCHIQ_STATE_T g_state;
168 static struct class *vchiq_class;
169 static struct device *vchiq_dev;
170 static DEFINE_SPINLOCK(msg_queue_spinlock);
171
172 static const char *const ioctl_names[] = {
173 "CONNECT",
174 "SHUTDOWN",
175 "CREATE_SERVICE",
176 "REMOVE_SERVICE",
177 "QUEUE_MESSAGE",
178 "QUEUE_BULK_TRANSMIT",
179 "QUEUE_BULK_RECEIVE",
180 "AWAIT_COMPLETION",
181 "DEQUEUE_MESSAGE",
182 "GET_CLIENT_ID",
183 "GET_CONFIG",
184 "CLOSE_SERVICE",
185 "USE_SERVICE",
186 "RELEASE_SERVICE",
187 "SET_SERVICE_OPTION",
188 "DUMP_PHYS_MEM",
189 "LIB_VERSION",
190 "CLOSE_DELIVERED"
191 };
192
193 vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
194 (VCHIQ_IOC_MAX + 1));
195
196 static void
197 dump_phys_mem(void *virt_addr, uint32_t num_bytes);
198
199 /****************************************************************************
200 *
201 * add_completion
202 *
203 ***************************************************************************/
204
205 static VCHIQ_STATUS_T
206 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
207 VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
208 void *bulk_userdata)
209 {
210 VCHIQ_COMPLETION_DATA_T *completion;
211 int insert;
212 DEBUG_INITIALISE(g_state.local)
213
214 insert = instance->completion_insert;
215 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
216 /* Out of space - wait for the client */
217 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
218 vchiq_log_trace(vchiq_arm_log_level,
219 "add_completion - completion queue full");
220 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
221
222 if (down_interruptible(&instance->remove_event) != 0) {
223 vchiq_log_info(vchiq_arm_log_level,
224 "service_callback interrupted");
225 return VCHIQ_RETRY;
226 }
227
228 if (instance->closing) {
229 vchiq_log_info(vchiq_arm_log_level,
230 "service_callback closing");
231 return VCHIQ_SUCCESS;
232 }
233 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
234 }
235
236 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
237
238 completion->header = header;
239 completion->reason = reason;
240 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
241 completion->service_userdata = user_service->service;
242 completion->bulk_userdata = bulk_userdata;
243
244 if (reason == VCHIQ_SERVICE_CLOSED) {
245 /* Take an extra reference, to be held until
246 this CLOSED notification is delivered. */
247 lock_service(user_service->service);
248 if (instance->use_close_delivered)
249 user_service->close_pending = 1;
250 }
251
252 /* A write barrier is needed here to ensure that the entire completion
253 record is written out before the insert point. */
254 wmb();
255
256 if (reason == VCHIQ_MESSAGE_AVAILABLE)
257 user_service->message_available_pos = insert;
258
259 instance->completion_insert = ++insert;
260
261 up(&instance->insert_event);
262
263 return VCHIQ_SUCCESS;
264 }
265
266 /****************************************************************************
267 *
268 * service_callback
269 *
270 ***************************************************************************/
271
272 static VCHIQ_STATUS_T
273 service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
274 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
275 {
276 /* How do we ensure the callback goes to the right client?
277 ** The service_user data points to a USER_SERVICE_T record containing
278 ** the original callback and the user state structure, which contains a
279 ** circular buffer for completion records.
280 */
281 USER_SERVICE_T *user_service;
282 VCHIQ_SERVICE_T *service;
283 VCHIQ_INSTANCE_T instance;
284 int skip_completion = 0;
285 DEBUG_INITIALISE(g_state.local)
286
287 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
288
289 service = handle_to_service(handle);
290 BUG_ON(!service);
291 user_service = (USER_SERVICE_T *)service->base.userdata;
292 instance = user_service->instance;
293
294 if (!instance || instance->closing)
295 return VCHIQ_SUCCESS;
296
297 vchiq_log_trace(vchiq_arm_log_level,
298 "service_callback - service %lx(%d,%p), reason %d, header %lx, "
299 "instance %lx, bulk_userdata %lx",
300 (unsigned long)user_service,
301 service->localport, user_service->userdata,
302 reason, (unsigned long)header,
303 (unsigned long)instance, (unsigned long)bulk_userdata);
304
305 if (header && user_service->is_vchi) {
306 spin_lock(&msg_queue_spinlock);
307 while (user_service->msg_insert ==
308 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
309 spin_unlock(&msg_queue_spinlock);
310 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
311 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
312 vchiq_log_trace(vchiq_arm_log_level,
313 "service_callback - msg queue full");
314 /* If there is no MESSAGE_AVAILABLE in the completion
315 ** queue, add one
316 */
317 if ((user_service->message_available_pos -
318 instance->completion_remove) < 0) {
319 VCHIQ_STATUS_T status;
320 vchiq_log_info(vchiq_arm_log_level,
321 "Inserting extra MESSAGE_AVAILABLE");
322 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
323 status = add_completion(instance, reason,
324 NULL, user_service, bulk_userdata);
325 if (status != VCHIQ_SUCCESS) {
326 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
327 return status;
328 }
329 }
330
331 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
332 if (down_interruptible(&user_service->remove_event)
333 != 0) {
334 vchiq_log_info(vchiq_arm_log_level,
335 "service_callback interrupted");
336 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
337 return VCHIQ_RETRY;
338 } else if (instance->closing) {
339 vchiq_log_info(vchiq_arm_log_level,
340 "service_callback closing");
341 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
342 return VCHIQ_ERROR;
343 }
344 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
345 spin_lock(&msg_queue_spinlock);
346 }
347
348 user_service->msg_queue[user_service->msg_insert &
349 (MSG_QUEUE_SIZE - 1)] = header;
350 user_service->msg_insert++;
351
352 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
353 ** there is a MESSAGE_AVAILABLE in the completion queue then
354 ** bypass the completion queue.
355 */
356 if (((user_service->message_available_pos -
357 instance->completion_remove) >= 0) ||
358 user_service->dequeue_pending) {
359 user_service->dequeue_pending = 0;
360 skip_completion = 1;
361 }
362
363 spin_unlock(&msg_queue_spinlock);
364
365 up(&user_service->insert_event);
366
367 header = NULL;
368 }
369
370 if (skip_completion) {
371 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
372 return VCHIQ_SUCCESS;
373 }
374
375 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
376
377 return add_completion(instance, reason, header, user_service,
378 bulk_userdata);
379 }
380
381 /****************************************************************************
382 *
383 * user_service_free
384 *
385 ***************************************************************************/
386 static void
387 user_service_free(void *userdata)
388 {
389 kfree(userdata);
390 }
391
392 /****************************************************************************
393 *
394 * close_delivered
395 *
396 ***************************************************************************/
397 static void close_delivered(USER_SERVICE_T *user_service)
398 {
399 vchiq_log_info(vchiq_arm_log_level,
400 "close_delivered(handle=%x)",
401 user_service->service->handle);
402
403 if (user_service->close_pending) {
404 /* Allow the underlying service to be culled */
405 unlock_service(user_service->service);
406
407 /* Wake the user-thread blocked in close_ or remove_service */
408 up(&user_service->close_event);
409
410 user_service->close_pending = 0;
411 }
412 }
413
414 /****************************************************************************
415 *
416 * vchiq_ioctl
417 *
418 ***************************************************************************/
419 static long
420 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
421 {
422 VCHIQ_INSTANCE_T instance = file->private_data;
423 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
424 VCHIQ_SERVICE_T *service = NULL;
425 long ret = 0;
426 int i, rc;
427 DEBUG_INITIALISE(g_state.local)
428
429 vchiq_log_trace(vchiq_arm_log_level,
430 "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
431 instance,
432 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
433 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
434 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
435
436 switch (cmd) {
437 case VCHIQ_IOC_SHUTDOWN:
438 if (!instance->connected)
439 break;
440
441 /* Remove all services */
442 i = 0;
443 while ((service = next_service_by_instance(instance->state,
444 instance, &i)) != NULL) {
445 status = vchiq_remove_service(service->handle);
446 unlock_service(service);
447 if (status != VCHIQ_SUCCESS)
448 break;
449 }
450 service = NULL;
451
452 if (status == VCHIQ_SUCCESS) {
453 /* Wake the completion thread and ask it to exit */
454 instance->closing = 1;
455 up(&instance->insert_event);
456 }
457
458 break;
459
460 case VCHIQ_IOC_CONNECT:
461 if (instance->connected) {
462 ret = -EINVAL;
463 break;
464 }
465 rc = mutex_lock_killable(&instance->state->mutex);
466 if (rc != 0) {
467 vchiq_log_error(vchiq_arm_log_level,
468 "vchiq: connect: could not lock mutex for "
469 "state %d: %d",
470 instance->state->id, rc);
471 ret = -EINTR;
472 break;
473 }
474 status = vchiq_connect_internal(instance->state, instance);
475 mutex_unlock(&instance->state->mutex);
476
477 if (status == VCHIQ_SUCCESS)
478 instance->connected = 1;
479 else
480 vchiq_log_error(vchiq_arm_log_level,
481 "vchiq: could not connect: %d", status);
482 break;
483
484 case VCHIQ_IOC_CREATE_SERVICE: {
485 VCHIQ_CREATE_SERVICE_T args;
486 USER_SERVICE_T *user_service = NULL;
487 void *userdata;
488 int srvstate;
489
490 if (copy_from_user
491 (&args, (const void __user *)arg,
492 sizeof(args)) != 0) {
493 ret = -EFAULT;
494 break;
495 }
496
497 user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
498 if (!user_service) {
499 ret = -ENOMEM;
500 break;
501 }
502
503 if (args.is_open) {
504 if (!instance->connected) {
505 ret = -ENOTCONN;
506 kfree(user_service);
507 break;
508 }
509 srvstate = VCHIQ_SRVSTATE_OPENING;
510 } else {
511 srvstate =
512 instance->connected ?
513 VCHIQ_SRVSTATE_LISTENING :
514 VCHIQ_SRVSTATE_HIDDEN;
515 }
516
517 userdata = args.params.userdata;
518 args.params.callback = service_callback;
519 args.params.userdata = user_service;
520 service = vchiq_add_service_internal(
521 instance->state,
522 &args.params, srvstate,
523 instance, user_service_free);
524
525 if (service != NULL) {
526 user_service->service = service;
527 user_service->userdata = userdata;
528 user_service->instance = instance;
529 user_service->is_vchi = (args.is_vchi != 0);
530 user_service->dequeue_pending = 0;
531 user_service->close_pending = 0;
532 user_service->message_available_pos =
533 instance->completion_remove - 1;
534 user_service->msg_insert = 0;
535 user_service->msg_remove = 0;
536 sema_init(&user_service->insert_event, 0);
537 sema_init(&user_service->remove_event, 0);
538 sema_init(&user_service->close_event, 0);
539
540 if (args.is_open) {
541 status = vchiq_open_service_internal
542 (service, instance->pid);
543 if (status != VCHIQ_SUCCESS) {
544 vchiq_remove_service(service->handle);
545 service = NULL;
546 ret = (status == VCHIQ_RETRY) ?
547 -EINTR : -EIO;
548 break;
549 }
550 }
551
552 if (copy_to_user((void __user *)
553 &(((VCHIQ_CREATE_SERVICE_T __user *)
554 arg)->handle),
555 (const void *)&service->handle,
556 sizeof(service->handle)) != 0) {
557 ret = -EFAULT;
558 vchiq_remove_service(service->handle);
559 }
560
561 service = NULL;
562 } else {
563 ret = -EEXIST;
564 kfree(user_service);
565 }
566 } break;
567
568 case VCHIQ_IOC_CLOSE_SERVICE: {
569 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
570
571 service = find_service_for_instance(instance, handle);
572 if (service != NULL) {
573 USER_SERVICE_T *user_service =
574 (USER_SERVICE_T *)service->base.userdata;
575 /* close_pending is false on first entry, and when the
576 wait in vchiq_close_service has been interrupted. */
577 if (!user_service->close_pending) {
578 status = vchiq_close_service(service->handle);
579 if (status != VCHIQ_SUCCESS)
580 break;
581 }
582
583 /* close_pending is true once the underlying service
584 has been closed until the client library calls the
585 CLOSE_DELIVERED ioctl, signalling close_event. */
586 if (user_service->close_pending &&
587 down_interruptible(&user_service->close_event))
588 status = VCHIQ_RETRY;
589 }
590 else
591 ret = -EINVAL;
592 } break;
593
594 case VCHIQ_IOC_REMOVE_SERVICE: {
595 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
596
597 service = find_service_for_instance(instance, handle);
598 if (service != NULL) {
599 USER_SERVICE_T *user_service =
600 (USER_SERVICE_T *)service->base.userdata;
601 /* close_pending is false on first entry, and when the
602 wait in vchiq_close_service has been interrupted. */
603 if (!user_service->close_pending) {
604 status = vchiq_remove_service(service->handle);
605 if (status != VCHIQ_SUCCESS)
606 break;
607 }
608
609 /* close_pending is true once the underlying service
610 has been closed until the client library calls the
611 CLOSE_DELIVERED ioctl, signalling close_event. */
612 if (user_service->close_pending &&
613 down_interruptible(&user_service->close_event))
614 status = VCHIQ_RETRY;
615 }
616 else
617 ret = -EINVAL;
618 } break;
619
620 case VCHIQ_IOC_USE_SERVICE:
621 case VCHIQ_IOC_RELEASE_SERVICE: {
622 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
623
624 service = find_service_for_instance(instance, handle);
625 if (service != NULL) {
626 status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
627 vchiq_use_service_internal(service) :
628 vchiq_release_service_internal(service);
629 if (status != VCHIQ_SUCCESS) {
630 vchiq_log_error(vchiq_susp_log_level,
631 "%s: cmd %s returned error %d for "
632 "service %c%c%c%c:%03d",
633 __func__,
634 (cmd == VCHIQ_IOC_USE_SERVICE) ?
635 "VCHIQ_IOC_USE_SERVICE" :
636 "VCHIQ_IOC_RELEASE_SERVICE",
637 status,
638 VCHIQ_FOURCC_AS_4CHARS(
639 service->base.fourcc),
640 service->client_id);
641 ret = -EINVAL;
642 }
643 } else
644 ret = -EINVAL;
645 } break;
646
647 case VCHIQ_IOC_QUEUE_MESSAGE: {
648 VCHIQ_QUEUE_MESSAGE_T args;
649 if (copy_from_user
650 (&args, (const void __user *)arg,
651 sizeof(args)) != 0) {
652 ret = -EFAULT;
653 break;
654 }
655
656 service = find_service_for_instance(instance, args.handle);
657
658 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
659 /* Copy elements into kernel space */
660 VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
661 if (copy_from_user(elements, args.elements,
662 args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
663 status = vchiq_queue_message
664 (args.handle,
665 elements, args.count);
666 else
667 ret = -EFAULT;
668 } else {
669 ret = -EINVAL;
670 }
671 } break;
672
673 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
674 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
675 VCHIQ_QUEUE_BULK_TRANSFER_T args;
676 struct bulk_waiter_node *waiter = NULL;
677 VCHIQ_BULK_DIR_T dir =
678 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
679 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
680
681 if (copy_from_user
682 (&args, (const void __user *)arg,
683 sizeof(args)) != 0) {
684 ret = -EFAULT;
685 break;
686 }
687
688 service = find_service_for_instance(instance, args.handle);
689 if (!service) {
690 ret = -EINVAL;
691 break;
692 }
693
694 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
695 waiter = kzalloc(sizeof(struct bulk_waiter_node),
696 GFP_KERNEL);
697 if (!waiter) {
698 ret = -ENOMEM;
699 break;
700 }
701 args.userdata = &waiter->bulk_waiter;
702 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
703 struct list_head *pos;
704 mutex_lock(&instance->bulk_waiter_list_mutex);
705 list_for_each(pos, &instance->bulk_waiter_list) {
706 if (list_entry(pos, struct bulk_waiter_node,
707 list)->pid == current->pid) {
708 waiter = list_entry(pos,
709 struct bulk_waiter_node,
710 list);
711 list_del(pos);
712 break;
713 }
714
715 }
716 mutex_unlock(&instance->bulk_waiter_list_mutex);
717 if (!waiter) {
718 vchiq_log_error(vchiq_arm_log_level,
719 "no bulk_waiter found for pid %d",
720 current->pid);
721 ret = -ESRCH;
722 break;
723 }
724 vchiq_log_info(vchiq_arm_log_level,
725 "found bulk_waiter %pK for pid %d", waiter,
726 current->pid);
727 args.userdata = &waiter->bulk_waiter;
728 }
729 status = vchiq_bulk_transfer
730 (args.handle,
731 VCHI_MEM_HANDLE_INVALID,
732 args.data, args.size,
733 args.userdata, args.mode,
734 dir);
735 if (!waiter)
736 break;
737 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
738 !waiter->bulk_waiter.bulk) {
739 if (waiter->bulk_waiter.bulk) {
740 /* Cancel the signal when the transfer
741 ** completes. */
742 spin_lock(&bulk_waiter_spinlock);
743 waiter->bulk_waiter.bulk->userdata = NULL;
744 spin_unlock(&bulk_waiter_spinlock);
745 }
746 kfree(waiter);
747 } else {
748 const VCHIQ_BULK_MODE_T mode_waiting =
749 VCHIQ_BULK_MODE_WAITING;
750 waiter->pid = current->pid;
751 mutex_lock(&instance->bulk_waiter_list_mutex);
752 list_add(&waiter->list, &instance->bulk_waiter_list);
753 mutex_unlock(&instance->bulk_waiter_list_mutex);
754 vchiq_log_info(vchiq_arm_log_level,
755 "saved bulk_waiter %pK for pid %d",
756 waiter, current->pid);
757
758 if (copy_to_user((void __user *)
759 &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
760 arg)->mode),
761 (const void *)&mode_waiting,
762 sizeof(mode_waiting)) != 0)
763 ret = -EFAULT;
764 }
765 } break;
766
767 case VCHIQ_IOC_AWAIT_COMPLETION: {
768 VCHIQ_AWAIT_COMPLETION_T args;
769
770 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
771 if (!instance->connected) {
772 ret = -ENOTCONN;
773 break;
774 }
775
776 if (copy_from_user(&args, (const void __user *)arg,
777 sizeof(args)) != 0) {
778 ret = -EFAULT;
779 break;
780 }
781
782 mutex_lock(&instance->completion_mutex);
783
784 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
785 while ((instance->completion_remove ==
786 instance->completion_insert)
787 && !instance->closing) {
788 int rc;
789
790 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
791 mutex_unlock(&instance->completion_mutex);
792 rc = down_interruptible(&instance->insert_event);
793 mutex_lock(&instance->completion_mutex);
794 if (rc != 0) {
795 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
796 vchiq_log_info(vchiq_arm_log_level,
797 "AWAIT_COMPLETION interrupted");
798 ret = -EINTR;
799 break;
800 }
801 }
802 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
803
804 if (ret == 0) {
805 int msgbufcount = args.msgbufcount;
806 int remove;
807
808 remove = instance->completion_remove;
809
810 for (ret = 0; ret < args.count; ret++) {
811 VCHIQ_COMPLETION_DATA_T *completion;
812 VCHIQ_SERVICE_T *service;
813 USER_SERVICE_T *user_service;
814 VCHIQ_HEADER_T *header;
815
816 if (remove == instance->completion_insert)
817 break;
818
819 completion = &instance->completions[
820 remove & (MAX_COMPLETIONS - 1)];
821
822
823 /* A read memory barrier is needed to prevent
824 ** the prefetch of a stale completion record
825 */
826 rmb();
827
828 service = completion->service_userdata;
829 user_service = service->base.userdata;
830 completion->service_userdata =
831 user_service->userdata;
832
833 header = completion->header;
834 if (header) {
835 void __user *msgbuf;
836 int msglen;
837
838 msglen = header->size +
839 sizeof(VCHIQ_HEADER_T);
840 /* This must be a VCHIQ-style service */
841 if (args.msgbufsize < msglen) {
842 vchiq_log_error(
843 vchiq_arm_log_level,
844 "header %pK: msgbufsize %x < msglen %x",
845 header, args.msgbufsize,
846 msglen);
847 WARN(1, "invalid message "
848 "size\n");
849 if (ret == 0)
850 ret = -EMSGSIZE;
851 break;
852 }
853 if (msgbufcount <= 0)
854 /* Stall here for lack of a
855 ** buffer for the message. */
856 break;
857 /* Get the pointer from user space */
858 msgbufcount--;
859 if (copy_from_user(&msgbuf,
860 (const void __user *)
861 &args.msgbufs[msgbufcount],
862 sizeof(msgbuf)) != 0) {
863 if (ret == 0)
864 ret = -EFAULT;
865 break;
866 }
867
868 /* Copy the message to user space */
869 if (copy_to_user(msgbuf, header,
870 msglen) != 0) {
871 if (ret == 0)
872 ret = -EFAULT;
873 break;
874 }
875
876 /* Now it has been copied, the message
877 ** can be released. */
878 vchiq_release_message(service->handle,
879 header);
880
881 /* The completion must point to the
882 ** msgbuf. */
883 completion->header = msgbuf;
884 }
885
886 if ((completion->reason ==
887 VCHIQ_SERVICE_CLOSED) &&
888 !instance->use_close_delivered)
889 unlock_service(service);
890
891 if (copy_to_user((void __user *)(
892 (size_t)args.buf +
893 ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
894 completion,
895 sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
896 if (ret == 0)
897 ret = -EFAULT;
898 break;
899 }
900
901 /* Ensure that the above copy has completed
902 ** before advancing the remove pointer. */
903 mb();
904
905 instance->completion_remove = ++remove;
906 }
907
908 if (msgbufcount != args.msgbufcount) {
909 if (copy_to_user((void __user *)
910 &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
911 msgbufcount,
912 &msgbufcount,
913 sizeof(msgbufcount)) != 0) {
914 ret = -EFAULT;
915 }
916 }
917 }
918
919 if (ret != 0)
920 up(&instance->remove_event);
921 mutex_unlock(&instance->completion_mutex);
922 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
923 } break;
924
925 case VCHIQ_IOC_DEQUEUE_MESSAGE: {
926 VCHIQ_DEQUEUE_MESSAGE_T args;
927 USER_SERVICE_T *user_service;
928 VCHIQ_HEADER_T *header;
929
930 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
931 if (copy_from_user
932 (&args, (const void __user *)arg,
933 sizeof(args)) != 0) {
934 ret = -EFAULT;
935 break;
936 }
937 service = find_service_for_instance(instance, args.handle);
938 if (!service) {
939 ret = -EINVAL;
940 break;
941 }
942 user_service = (USER_SERVICE_T *)service->base.userdata;
943 if (user_service->is_vchi == 0) {
944 ret = -EINVAL;
945 break;
946 }
947
948 spin_lock(&msg_queue_spinlock);
949 if (user_service->msg_remove == user_service->msg_insert) {
950 if (!args.blocking) {
951 spin_unlock(&msg_queue_spinlock);
952 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
953 ret = -EWOULDBLOCK;
954 break;
955 }
956 user_service->dequeue_pending = 1;
957 do {
958 spin_unlock(&msg_queue_spinlock);
959 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
960 if (down_interruptible(
961 &user_service->insert_event) != 0) {
962 vchiq_log_info(vchiq_arm_log_level,
963 "DEQUEUE_MESSAGE interrupted");
964 ret = -EINTR;
965 break;
966 }
967 spin_lock(&msg_queue_spinlock);
968 } while (user_service->msg_remove ==
969 user_service->msg_insert);
970
971 if (ret)
972 break;
973 }
974
975 BUG_ON((int)(user_service->msg_insert -
976 user_service->msg_remove) < 0);
977
978 header = user_service->msg_queue[user_service->msg_remove &
979 (MSG_QUEUE_SIZE - 1)];
980 user_service->msg_remove++;
981 spin_unlock(&msg_queue_spinlock);
982
983 up(&user_service->remove_event);
984 if (header == NULL)
985 ret = -ENOTCONN;
986 else if (header->size <= args.bufsize) {
987 /* Copy to user space if msgbuf is not NULL */
988 if ((args.buf == NULL) ||
989 (copy_to_user((void __user *)args.buf,
990 header->data,
991 header->size) == 0)) {
992 ret = header->size;
993 vchiq_release_message(
994 service->handle,
995 header);
996 } else
997 ret = -EFAULT;
998 } else {
999 vchiq_log_error(vchiq_arm_log_level,
1000 "header %pK: bufsize %x < size %x",
1001 header, args.bufsize, header->size);
1002 WARN(1, "invalid size\n");
1003 ret = -EMSGSIZE;
1004 }
1005 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1006 } break;
1007
1008 case VCHIQ_IOC_GET_CLIENT_ID: {
1009 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1010
1011 ret = vchiq_get_client_id(handle);
1012 } break;
1013
1014 case VCHIQ_IOC_GET_CONFIG: {
1015 VCHIQ_GET_CONFIG_T args;
1016 VCHIQ_CONFIG_T config;
1017
1018 if (copy_from_user(&args, (const void __user *)arg,
1019 sizeof(args)) != 0) {
1020 ret = -EFAULT;
1021 break;
1022 }
1023 if (args.config_size > sizeof(config)) {
1024 ret = -EINVAL;
1025 break;
1026 }
1027 status = vchiq_get_config(instance, args.config_size, &config);
1028 if (status == VCHIQ_SUCCESS) {
1029 if (copy_to_user((void __user *)args.pconfig,
1030 &config, args.config_size) != 0) {
1031 ret = -EFAULT;
1032 break;
1033 }
1034 }
1035 } break;
1036
1037 case VCHIQ_IOC_SET_SERVICE_OPTION: {
1038 VCHIQ_SET_SERVICE_OPTION_T args;
1039
1040 if (copy_from_user(
1041 &args, (const void __user *)arg,
1042 sizeof(args)) != 0) {
1043 ret = -EFAULT;
1044 break;
1045 }
1046
1047 service = find_service_for_instance(instance, args.handle);
1048 if (!service) {
1049 ret = -EINVAL;
1050 break;
1051 }
1052
1053 status = vchiq_set_service_option(
1054 args.handle, args.option, args.value);
1055 } break;
1056
1057 case VCHIQ_IOC_DUMP_PHYS_MEM: {
1058 VCHIQ_DUMP_MEM_T args;
1059
1060 if (copy_from_user
1061 (&args, (const void __user *)arg,
1062 sizeof(args)) != 0) {
1063 ret = -EFAULT;
1064 break;
1065 }
1066 dump_phys_mem(args.virt_addr, args.num_bytes);
1067 } break;
1068
1069 case VCHIQ_IOC_LIB_VERSION: {
1070 unsigned int lib_version = (unsigned int)arg;
1071
1072 if (lib_version < VCHIQ_VERSION_MIN)
1073 ret = -EINVAL;
1074 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1075 instance->use_close_delivered = 1;
1076 } break;
1077
1078 case VCHIQ_IOC_CLOSE_DELIVERED: {
1079 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1080
1081 service = find_closed_service_for_instance(instance, handle);
1082 if (service != NULL) {
1083 USER_SERVICE_T *user_service =
1084 (USER_SERVICE_T *)service->base.userdata;
1085 close_delivered(user_service);
1086 }
1087 else
1088 ret = -EINVAL;
1089 } break;
1090
1091 default:
1092 ret = -ENOTTY;
1093 break;
1094 }
1095
1096 if (service)
1097 unlock_service(service);
1098
1099 if (ret == 0) {
1100 if (status == VCHIQ_ERROR)
1101 ret = -EIO;
1102 else if (status == VCHIQ_RETRY)
1103 ret = -EINTR;
1104 }
1105
1106 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1107 (ret != -EWOULDBLOCK))
1108 vchiq_log_info(vchiq_arm_log_level,
1109 " ioctl instance %lx, cmd %s -> status %d, %ld",
1110 (unsigned long)instance,
1111 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1112 ioctl_names[_IOC_NR(cmd)] :
1113 "<invalid>",
1114 status, ret);
1115 else
1116 vchiq_log_trace(vchiq_arm_log_level,
1117 " ioctl instance %lx, cmd %s -> status %d, %ld",
1118 (unsigned long)instance,
1119 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1120 ioctl_names[_IOC_NR(cmd)] :
1121 "<invalid>",
1122 status, ret);
1123
1124 return ret;
1125 }
1126
1127 /****************************************************************************
1128 *
1129 * vchiq_open
1130 *
1131 ***************************************************************************/
1132
1133 static int
1134 vchiq_open(struct inode *inode, struct file *file)
1135 {
1136 int dev = iminor(inode) & 0x0f;
1137 vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1138 switch (dev) {
1139 case VCHIQ_MINOR: {
1140 int ret;
1141 VCHIQ_STATE_T *state = vchiq_get_state();
1142 VCHIQ_INSTANCE_T instance;
1143
1144 if (!state) {
1145 vchiq_log_error(vchiq_arm_log_level,
1146 "vchiq has no connection to VideoCore");
1147 return -ENOTCONN;
1148 }
1149
1150 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1151 if (!instance)
1152 return -ENOMEM;
1153
1154 instance->state = state;
1155 instance->pid = current->tgid;
1156
1157 ret = vchiq_debugfs_add_instance(instance);
1158 if (ret != 0) {
1159 kfree(instance);
1160 return ret;
1161 }
1162
1163 sema_init(&instance->insert_event, 0);
1164 sema_init(&instance->remove_event, 0);
1165 mutex_init(&instance->completion_mutex);
1166 mutex_init(&instance->bulk_waiter_list_mutex);
1167 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1168
1169 file->private_data = instance;
1170 } break;
1171
1172 default:
1173 vchiq_log_error(vchiq_arm_log_level,
1174 "Unknown minor device: %d", dev);
1175 return -ENXIO;
1176 }
1177
1178 return 0;
1179 }
1180
1181 /****************************************************************************
1182 *
1183 * vchiq_release
1184 *
1185 ***************************************************************************/
1186
1187 static int
1188 vchiq_release(struct inode *inode, struct file *file)
1189 {
1190 int dev = iminor(inode) & 0x0f;
1191 int ret = 0;
1192 switch (dev) {
1193 case VCHIQ_MINOR: {
1194 VCHIQ_INSTANCE_T instance = file->private_data;
1195 VCHIQ_STATE_T *state = vchiq_get_state();
1196 VCHIQ_SERVICE_T *service;
1197 int i;
1198
1199 vchiq_log_info(vchiq_arm_log_level,
1200 "vchiq_release: instance=%lx",
1201 (unsigned long)instance);
1202
1203 if (!state) {
1204 ret = -EPERM;
1205 goto out;
1206 }
1207
1208 /* Ensure videocore is awake to allow termination. */
1209 vchiq_use_internal(instance->state, NULL,
1210 USE_TYPE_VCHIQ);
1211
1212 mutex_lock(&instance->completion_mutex);
1213
1214 /* Wake the completion thread and ask it to exit */
1215 instance->closing = 1;
1216 up(&instance->insert_event);
1217
1218 mutex_unlock(&instance->completion_mutex);
1219
1220 /* Wake the slot handler if the completion queue is full. */
1221 up(&instance->remove_event);
1222
1223 /* Mark all services for termination... */
1224 i = 0;
1225 while ((service = next_service_by_instance(state, instance,
1226 &i)) != NULL) {
1227 USER_SERVICE_T *user_service = service->base.userdata;
1228
1229 /* Wake the slot handler if the msg queue is full. */
1230 up(&user_service->remove_event);
1231
1232 vchiq_terminate_service_internal(service);
1233 unlock_service(service);
1234 }
1235
1236 /* ...and wait for them to die */
1237 i = 0;
1238 while ((service = next_service_by_instance(state, instance, &i))
1239 != NULL) {
1240 USER_SERVICE_T *user_service = service->base.userdata;
1241
1242 down(&service->remove_event);
1243
1244 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1245
1246 spin_lock(&msg_queue_spinlock);
1247
1248 while (user_service->msg_remove !=
1249 user_service->msg_insert) {
1250 VCHIQ_HEADER_T *header = user_service->
1251 msg_queue[user_service->msg_remove &
1252 (MSG_QUEUE_SIZE - 1)];
1253 user_service->msg_remove++;
1254 spin_unlock(&msg_queue_spinlock);
1255
1256 if (header)
1257 vchiq_release_message(
1258 service->handle,
1259 header);
1260 spin_lock(&msg_queue_spinlock);
1261 }
1262
1263 spin_unlock(&msg_queue_spinlock);
1264
1265 unlock_service(service);
1266 }
1267
1268 /* Release any closed services */
1269 while (instance->completion_remove !=
1270 instance->completion_insert) {
1271 VCHIQ_COMPLETION_DATA_T *completion;
1272 VCHIQ_SERVICE_T *service;
1273 completion = &instance->completions[
1274 instance->completion_remove &
1275 (MAX_COMPLETIONS - 1)];
1276 service = completion->service_userdata;
1277 if (completion->reason == VCHIQ_SERVICE_CLOSED)
1278 {
1279 USER_SERVICE_T *user_service =
1280 service->base.userdata;
1281
1282 /* Wake any blocked user-thread */
1283 if (instance->use_close_delivered)
1284 up(&user_service->close_event);
1285 unlock_service(service);
1286 }
1287 instance->completion_remove++;
1288 }
1289
1290 /* Release the PEER service count. */
1291 vchiq_release_internal(instance->state, NULL);
1292
1293 {
1294 struct list_head *pos, *next;
1295 list_for_each_safe(pos, next,
1296 &instance->bulk_waiter_list) {
1297 struct bulk_waiter_node *waiter;
1298 waiter = list_entry(pos,
1299 struct bulk_waiter_node,
1300 list);
1301 list_del(pos);
1302 vchiq_log_info(vchiq_arm_log_level,
1303 "bulk_waiter - cleaned up %pK for pid %d",
1304 waiter, waiter->pid);
1305 kfree(waiter);
1306 }
1307 }
1308
1309 vchiq_debugfs_remove_instance(instance);
1310
1311 kfree(instance);
1312 file->private_data = NULL;
1313 } break;
1314
1315 default:
1316 vchiq_log_error(vchiq_arm_log_level,
1317 "Unknown minor device: %d", dev);
1318 ret = -ENXIO;
1319 }
1320
1321 out:
1322 return ret;
1323 }
1324
1325 /****************************************************************************
1326 *
1327 * vchiq_dump
1328 *
1329 ***************************************************************************/
1330
1331 void
1332 vchiq_dump(void *dump_context, const char *str, int len)
1333 {
1334 DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
1335
1336 if (context->actual < context->space) {
1337 int copy_bytes;
1338 if (context->offset > 0) {
1339 int skip_bytes = min(len, (int)context->offset);
1340 str += skip_bytes;
1341 len -= skip_bytes;
1342 context->offset -= skip_bytes;
1343 if (context->offset > 0)
1344 return;
1345 }
1346 copy_bytes = min(len, (int)(context->space - context->actual));
1347 if (copy_bytes == 0)
1348 return;
1349 if (copy_to_user(context->buf + context->actual, str,
1350 copy_bytes))
1351 context->actual = -EFAULT;
1352 context->actual += copy_bytes;
1353 len -= copy_bytes;
1354
1355 /* If tne terminating NUL is included in the length, then it
1356 ** marks the end of a line and should be replaced with a
1357 ** carriage return. */
1358 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1359 char cr = '\n';
1360 if (copy_to_user(context->buf + context->actual - 1,
1361 &cr, 1))
1362 context->actual = -EFAULT;
1363 }
1364 }
1365 }
1366
1367 /****************************************************************************
1368 *
1369 * vchiq_dump_platform_instance_state
1370 *
1371 ***************************************************************************/
1372
1373 void
1374 vchiq_dump_platform_instances(void *dump_context)
1375 {
1376 VCHIQ_STATE_T *state = vchiq_get_state();
1377 char buf[80];
1378 int len;
1379 int i;
1380
1381 /* There is no list of instances, so instead scan all services,
1382 marking those that have been dumped. */
1383
1384 for (i = 0; i < state->unused_service; i++) {
1385 VCHIQ_SERVICE_T *service = state->services[i];
1386 VCHIQ_INSTANCE_T instance;
1387
1388 if (service && (service->base.callback == service_callback)) {
1389 instance = service->instance;
1390 if (instance)
1391 instance->mark = 0;
1392 }
1393 }
1394
1395 for (i = 0; i < state->unused_service; i++) {
1396 VCHIQ_SERVICE_T *service = state->services[i];
1397 VCHIQ_INSTANCE_T instance;
1398
1399 if (service && (service->base.callback == service_callback)) {
1400 instance = service->instance;
1401 if (instance && !instance->mark) {
1402 len = snprintf(buf, sizeof(buf),
1403 "Instance %pK: pid %d,%s completions %d/%d",
1404 instance, instance->pid,
1405 instance->connected ? " connected, " :
1406 "",
1407 instance->completion_insert -
1408 instance->completion_remove,
1409 MAX_COMPLETIONS);
1410
1411 vchiq_dump(dump_context, buf, len + 1);
1412
1413 instance->mark = 1;
1414 }
1415 }
1416 }
1417 }
1418
1419 /****************************************************************************
1420 *
1421 * vchiq_dump_platform_service_state
1422 *
1423 ***************************************************************************/
1424
1425 void
1426 vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
1427 {
1428 USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
1429 char buf[80];
1430 int len;
1431
1432 len = snprintf(buf, sizeof(buf), " instance %pK", service->instance);
1433
1434 if ((service->base.callback == service_callback) &&
1435 user_service->is_vchi) {
1436 len += snprintf(buf + len, sizeof(buf) - len,
1437 ", %d/%d messages",
1438 user_service->msg_insert - user_service->msg_remove,
1439 MSG_QUEUE_SIZE);
1440
1441 if (user_service->dequeue_pending)
1442 len += snprintf(buf + len, sizeof(buf) - len,
1443 " (dequeue pending)");
1444 }
1445
1446 vchiq_dump(dump_context, buf, len + 1);
1447 }
1448
1449 /****************************************************************************
1450 *
1451 * dump_user_mem
1452 *
1453 ***************************************************************************/
1454
1455 static void
1456 dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1457 {
1458 int rc;
1459 uint8_t *end_virt_addr = virt_addr + num_bytes;
1460 int num_pages;
1461 int offset;
1462 int end_offset;
1463 int page_idx;
1464 int prev_idx;
1465 struct page *page;
1466 struct page **pages;
1467 uint8_t *kmapped_virt_ptr;
1468
1469 /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
1470
1471 virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
1472 end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
1473 ~0x0fuL);
1474
1475 offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
1476 end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
1477
1478 num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1479
1480 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1481 if (pages == NULL) {
1482 vchiq_log_error(vchiq_arm_log_level,
1483 "Unable to allocation memory for %d pages\n",
1484 num_pages);
1485 return;
1486 }
1487
1488 down_read(&current->mm->mmap_sem);
1489 rc = get_user_pages(
1490 (unsigned long)virt_addr, /* start */
1491 num_pages, /* len */
1492 0, /* gup_flags */
1493 pages, /* pages (array of page pointers) */
1494 NULL); /* vmas */
1495 up_read(&current->mm->mmap_sem);
1496
1497 prev_idx = -1;
1498 page = NULL;
1499
1500 if (rc < 0) {
1501 vchiq_log_error(vchiq_arm_log_level,
1502 "Failed to get user pages: %d\n", rc);
1503 goto out;
1504 }
1505
1506 while (offset < end_offset) {
1507
1508 int page_offset = offset % PAGE_SIZE;
1509 page_idx = offset / PAGE_SIZE;
1510
1511 if (page_idx != prev_idx) {
1512
1513 if (page != NULL)
1514 kunmap(page);
1515 page = pages[page_idx];
1516 kmapped_virt_ptr = kmap(page);
1517
1518 prev_idx = page_idx;
1519 }
1520
1521 if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
1522 vchiq_log_dump_mem("ph",
1523 (uint32_t)(unsigned long)&kmapped_virt_ptr[
1524 page_offset],
1525 &kmapped_virt_ptr[page_offset], 16);
1526
1527 offset += 16;
1528 }
1529
1530 out:
1531 if (page != NULL)
1532 kunmap(page);
1533
1534 for (page_idx = 0; page_idx < num_pages; page_idx++)
1535 put_page(pages[page_idx]);
1536
1537 kfree(pages);
1538 }
1539
1540 /****************************************************************************
1541 *
1542 * vchiq_read
1543 *
1544 ***************************************************************************/
1545
1546 static ssize_t
1547 vchiq_read(struct file *file, char __user *buf,
1548 size_t count, loff_t *ppos)
1549 {
1550 DUMP_CONTEXT_T context;
1551 context.buf = buf;
1552 context.actual = 0;
1553 context.space = count;
1554 context.offset = *ppos;
1555
1556 vchiq_dump_state(&context, &g_state);
1557
1558 *ppos += context.actual;
1559
1560 return context.actual;
1561 }
1562
1563 VCHIQ_STATE_T *
1564 vchiq_get_state(void)
1565 {
1566
1567 if (g_state.remote == NULL)
1568 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
1569 else if (g_state.remote->initialised != 1)
1570 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
1571 __func__, g_state.remote->initialised);
1572
1573 return ((g_state.remote != NULL) &&
1574 (g_state.remote->initialised == 1)) ? &g_state : NULL;
1575 }
1576
1577 static const struct file_operations
1578 vchiq_fops = {
1579 .owner = THIS_MODULE,
1580 .unlocked_ioctl = vchiq_ioctl,
1581 .open = vchiq_open,
1582 .release = vchiq_release,
1583 .read = vchiq_read
1584 };
1585
1586 /*
1587 * Autosuspend related functionality
1588 */
1589
1590 int
1591 vchiq_videocore_wanted(VCHIQ_STATE_T *state)
1592 {
1593 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1594 if (!arm_state)
1595 /* autosuspend not supported - always return wanted */
1596 return 1;
1597 else if (arm_state->blocked_count)
1598 return 1;
1599 else if (!arm_state->videocore_use_count)
1600 /* usage count zero - check for override unless we're forcing */
1601 if (arm_state->resume_blocked)
1602 return 0;
1603 else
1604 return vchiq_platform_videocore_wanted(state);
1605 else
1606 /* non-zero usage count - videocore still required */
1607 return 1;
1608 }
1609
1610 static VCHIQ_STATUS_T
1611 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
1612 VCHIQ_HEADER_T *header,
1613 VCHIQ_SERVICE_HANDLE_T service_user,
1614 void *bulk_user)
1615 {
1616 vchiq_log_error(vchiq_susp_log_level,
1617 "%s callback reason %d", __func__, reason);
1618 return 0;
1619 }
1620
1621 static int
1622 vchiq_keepalive_thread_func(void *v)
1623 {
1624 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
1625 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1626
1627 VCHIQ_STATUS_T status;
1628 VCHIQ_INSTANCE_T instance;
1629 VCHIQ_SERVICE_HANDLE_T ka_handle;
1630
1631 VCHIQ_SERVICE_PARAMS_T params = {
1632 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1633 .callback = vchiq_keepalive_vchiq_callback,
1634 .version = KEEPALIVE_VER,
1635 .version_min = KEEPALIVE_VER_MIN
1636 };
1637
1638 status = vchiq_initialise(&instance);
1639 if (status != VCHIQ_SUCCESS) {
1640 vchiq_log_error(vchiq_susp_log_level,
1641 "%s vchiq_initialise failed %d", __func__, status);
1642 goto exit;
1643 }
1644
1645 status = vchiq_connect(instance);
1646 if (status != VCHIQ_SUCCESS) {
1647 vchiq_log_error(vchiq_susp_log_level,
1648 "%s vchiq_connect failed %d", __func__, status);
1649 goto shutdown;
1650 }
1651
1652 status = vchiq_add_service(instance, &params, &ka_handle);
1653 if (status != VCHIQ_SUCCESS) {
1654 vchiq_log_error(vchiq_susp_log_level,
1655 "%s vchiq_open_service failed %d", __func__, status);
1656 goto shutdown;
1657 }
1658
1659 while (1) {
1660 long rc = 0, uc = 0;
1661 if (wait_for_completion_interruptible(&arm_state->ka_evt)
1662 != 0) {
1663 vchiq_log_error(vchiq_susp_log_level,
1664 "%s interrupted", __func__);
1665 flush_signals(current);
1666 continue;
1667 }
1668
1669 /* read and clear counters. Do release_count then use_count to
1670 * prevent getting more releases than uses */
1671 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1672 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1673
1674 /* Call use/release service the requisite number of times.
1675 * Process use before release so use counts don't go negative */
1676 while (uc--) {
1677 atomic_inc(&arm_state->ka_use_ack_count);
1678 status = vchiq_use_service(ka_handle);
1679 if (status != VCHIQ_SUCCESS) {
1680 vchiq_log_error(vchiq_susp_log_level,
1681 "%s vchiq_use_service error %d",
1682 __func__, status);
1683 }
1684 }
1685 while (rc--) {
1686 status = vchiq_release_service(ka_handle);
1687 if (status != VCHIQ_SUCCESS) {
1688 vchiq_log_error(vchiq_susp_log_level,
1689 "%s vchiq_release_service error %d",
1690 __func__, status);
1691 }
1692 }
1693 }
1694
1695 shutdown:
1696 vchiq_shutdown(instance);
1697 exit:
1698 return 0;
1699 }
1700
1701
1702
1703 VCHIQ_STATUS_T
1704 vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
1705 {
1706 if (arm_state) {
1707 rwlock_init(&arm_state->susp_res_lock);
1708
1709 init_completion(&arm_state->ka_evt);
1710 atomic_set(&arm_state->ka_use_count, 0);
1711 atomic_set(&arm_state->ka_use_ack_count, 0);
1712 atomic_set(&arm_state->ka_release_count, 0);
1713
1714 init_completion(&arm_state->vc_suspend_complete);
1715
1716 init_completion(&arm_state->vc_resume_complete);
1717 /* Initialise to 'done' state. We only want to block on resume
1718 * completion while videocore is suspended. */
1719 set_resume_state(arm_state, VC_RESUME_RESUMED);
1720
1721 init_completion(&arm_state->resume_blocker);
1722 /* Initialise to 'done' state. We only want to block on this
1723 * completion while resume is blocked */
1724 complete_all(&arm_state->resume_blocker);
1725
1726 init_completion(&arm_state->blocked_blocker);
1727 /* Initialise to 'done' state. We only want to block on this
1728 * completion while things are waiting on the resume blocker */
1729 complete_all(&arm_state->blocked_blocker);
1730
1731 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
1732 arm_state->suspend_timer_running = 0;
1733 setup_timer(&arm_state->suspend_timer, suspend_timer_callback,
1734 (unsigned long)(state));
1735
1736 arm_state->first_connect = 0;
1737
1738 }
1739 return VCHIQ_SUCCESS;
1740 }
1741
1742 /*
1743 ** Functions to modify the state variables;
1744 ** set_suspend_state
1745 ** set_resume_state
1746 **
1747 ** There are more state variables than we might like, so ensure they remain in
1748 ** step. Suspend and resume state are maintained separately, since most of
1749 ** these state machines can operate independently. However, there are a few
1750 ** states where state transitions in one state machine cause a reset to the
1751 ** other state machine. In addition, there are some completion events which
1752 ** need to occur on state machine reset and end-state(s), so these are also
1753 ** dealt with in these functions.
1754 **
1755 ** In all states we set the state variable according to the input, but in some
1756 ** cases we perform additional steps outlined below;
1757 **
1758 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
1759 ** The suspend completion is completed after any suspend
1760 ** attempt. When we reset the state machine we also reset
1761 ** the completion. This reset occurs when videocore is
1762 ** resumed, and also if we initiate suspend after a suspend
1763 ** failure.
1764 **
1765 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
1766 ** suspend - ie from this point on we must try to suspend
1767 ** before resuming can occur. We therefore also reset the
1768 ** resume state machine to VC_RESUME_IDLE in this state.
1769 **
1770 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
1771 ** complete_all on the suspend completion to notify
1772 ** anything waiting for suspend to happen.
1773 **
1774 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
1775 ** initiate resume, so no need to alter resume state.
1776 ** We call complete_all on the suspend completion to notify
1777 ** of suspend rejection.
1778 **
1779 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
1780 ** suspend completion and reset the resume state machine.
1781 **
1782 ** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
1783 ** resume completion is in it's 'done' state whenever
1784 ** videcore is running. Therefore, the VC_RESUME_IDLE
1785 ** state implies that videocore is suspended.
1786 ** Hence, any thread which needs to wait until videocore is
1787 ** running can wait on this completion - it will only block
1788 ** if videocore is suspended.
1789 **
1790 ** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
1791 ** Call complete_all on the resume completion to unblock
1792 ** any threads waiting for resume. Also reset the suspend
1793 ** state machine to it's idle state.
1794 **
1795 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
1796 */
1797
1798 void
1799 set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
1800 enum vc_suspend_status new_state)
1801 {
1802 /* set the state in all cases */
1803 arm_state->vc_suspend_state = new_state;
1804
1805 /* state specific additional actions */
1806 switch (new_state) {
1807 case VC_SUSPEND_FORCE_CANCELED:
1808 complete_all(&arm_state->vc_suspend_complete);
1809 break;
1810 case VC_SUSPEND_REJECTED:
1811 complete_all(&arm_state->vc_suspend_complete);
1812 break;
1813 case VC_SUSPEND_FAILED:
1814 complete_all(&arm_state->vc_suspend_complete);
1815 arm_state->vc_resume_state = VC_RESUME_RESUMED;
1816 complete_all(&arm_state->vc_resume_complete);
1817 break;
1818 case VC_SUSPEND_IDLE:
1819 reinit_completion(&arm_state->vc_suspend_complete);
1820 break;
1821 case VC_SUSPEND_REQUESTED:
1822 break;
1823 case VC_SUSPEND_IN_PROGRESS:
1824 set_resume_state(arm_state, VC_RESUME_IDLE);
1825 break;
1826 case VC_SUSPEND_SUSPENDED:
1827 complete_all(&arm_state->vc_suspend_complete);
1828 break;
1829 default:
1830 BUG();
1831 break;
1832 }
1833 }
1834
1835 void
1836 set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
1837 enum vc_resume_status new_state)
1838 {
1839 /* set the state in all cases */
1840 arm_state->vc_resume_state = new_state;
1841
1842 /* state specific additional actions */
1843 switch (new_state) {
1844 case VC_RESUME_FAILED:
1845 break;
1846 case VC_RESUME_IDLE:
1847 reinit_completion(&arm_state->vc_resume_complete);
1848 break;
1849 case VC_RESUME_REQUESTED:
1850 break;
1851 case VC_RESUME_IN_PROGRESS:
1852 break;
1853 case VC_RESUME_RESUMED:
1854 complete_all(&arm_state->vc_resume_complete);
1855 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
1856 break;
1857 default:
1858 BUG();
1859 break;
1860 }
1861 }
1862
1863
1864 /* should be called with the write lock held */
1865 inline void
1866 start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1867 {
1868 del_timer(&arm_state->suspend_timer);
1869 arm_state->suspend_timer.expires = jiffies +
1870 msecs_to_jiffies(arm_state->
1871 suspend_timer_timeout);
1872 add_timer(&arm_state->suspend_timer);
1873 arm_state->suspend_timer_running = 1;
1874 }
1875
1876 /* should be called with the write lock held */
1877 static inline void
1878 stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1879 {
1880 if (arm_state->suspend_timer_running) {
1881 del_timer(&arm_state->suspend_timer);
1882 arm_state->suspend_timer_running = 0;
1883 }
1884 }
1885
1886 static inline int
1887 need_resume(VCHIQ_STATE_T *state)
1888 {
1889 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1890 return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
1891 (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
1892 vchiq_videocore_wanted(state);
1893 }
1894
1895 static int
1896 block_resume(VCHIQ_ARM_STATE_T *arm_state)
1897 {
1898 int status = VCHIQ_SUCCESS;
1899 const unsigned long timeout_val =
1900 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
1901 int resume_count = 0;
1902
1903 /* Allow any threads which were blocked by the last force suspend to
1904 * complete if they haven't already. Only give this one shot; if
1905 * blocked_count is incremented after blocked_blocker is completed
1906 * (which only happens when blocked_count hits 0) then those threads
1907 * will have to wait until next time around */
1908 if (arm_state->blocked_count) {
1909 reinit_completion(&arm_state->blocked_blocker);
1910 write_unlock_bh(&arm_state->susp_res_lock);
1911 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
1912 "blocked clients", __func__);
1913 if (wait_for_completion_interruptible_timeout(
1914 &arm_state->blocked_blocker, timeout_val)
1915 <= 0) {
1916 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
1917 "previously blocked clients failed" , __func__);
1918 status = VCHIQ_ERROR;
1919 write_lock_bh(&arm_state->susp_res_lock);
1920 goto out;
1921 }
1922 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
1923 "clients resumed", __func__);
1924 write_lock_bh(&arm_state->susp_res_lock);
1925 }
1926
1927 /* We need to wait for resume to complete if it's in process */
1928 while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
1929 arm_state->vc_resume_state > VC_RESUME_IDLE) {
1930 if (resume_count > 1) {
1931 status = VCHIQ_ERROR;
1932 vchiq_log_error(vchiq_susp_log_level, "%s waited too "
1933 "many times for resume" , __func__);
1934 goto out;
1935 }
1936 write_unlock_bh(&arm_state->susp_res_lock);
1937 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
1938 __func__);
1939 if (wait_for_completion_interruptible_timeout(
1940 &arm_state->vc_resume_complete, timeout_val)
1941 <= 0) {
1942 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
1943 "resume failed (%s)", __func__,
1944 resume_state_names[arm_state->vc_resume_state +
1945 VC_RESUME_NUM_OFFSET]);
1946 status = VCHIQ_ERROR;
1947 write_lock_bh(&arm_state->susp_res_lock);
1948 goto out;
1949 }
1950 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
1951 write_lock_bh(&arm_state->susp_res_lock);
1952 resume_count++;
1953 }
1954 reinit_completion(&arm_state->resume_blocker);
1955 arm_state->resume_blocked = 1;
1956
1957 out:
1958 return status;
1959 }
1960
1961 static inline void
1962 unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
1963 {
1964 complete_all(&arm_state->resume_blocker);
1965 arm_state->resume_blocked = 0;
1966 }
1967
1968 /* Initiate suspend via slot handler. Should be called with the write lock
1969 * held */
1970 VCHIQ_STATUS_T
1971 vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
1972 {
1973 VCHIQ_STATUS_T status = VCHIQ_ERROR;
1974 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1975
1976 if (!arm_state)
1977 goto out;
1978
1979 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
1980 status = VCHIQ_SUCCESS;
1981
1982
1983 switch (arm_state->vc_suspend_state) {
1984 case VC_SUSPEND_REQUESTED:
1985 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
1986 "requested", __func__);
1987 break;
1988 case VC_SUSPEND_IN_PROGRESS:
1989 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
1990 "progress", __func__);
1991 break;
1992
1993 default:
1994 /* We don't expect to be in other states, so log but continue
1995 * anyway */
1996 vchiq_log_error(vchiq_susp_log_level,
1997 "%s unexpected suspend state %s", __func__,
1998 suspend_state_names[arm_state->vc_suspend_state +
1999 VC_SUSPEND_NUM_OFFSET]);
2000 /* fall through */
2001 case VC_SUSPEND_REJECTED:
2002 case VC_SUSPEND_FAILED:
2003 /* Ensure any idle state actions have been run */
2004 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2005 /* fall through */
2006 case VC_SUSPEND_IDLE:
2007 vchiq_log_info(vchiq_susp_log_level,
2008 "%s: suspending", __func__);
2009 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
2010 /* kick the slot handler thread to initiate suspend */
2011 request_poll(state, NULL, 0);
2012 break;
2013 }
2014
2015 out:
2016 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2017 return status;
2018 }
2019
2020 void
2021 vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
2022 {
2023 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2024 int susp = 0;
2025
2026 if (!arm_state)
2027 goto out;
2028
2029 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2030
2031 write_lock_bh(&arm_state->susp_res_lock);
2032 if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2033 arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2034 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2035 susp = 1;
2036 }
2037 write_unlock_bh(&arm_state->susp_res_lock);
2038
2039 if (susp)
2040 vchiq_platform_suspend(state);
2041
2042 out:
2043 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2044 return;
2045 }
2046
2047
2048 static void
2049 output_timeout_error(VCHIQ_STATE_T *state)
2050 {
2051 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2052 char err[50] = "";
2053 int vc_use_count = arm_state->videocore_use_count;
2054 int active_services = state->unused_service;
2055 int i;
2056
2057 if (!arm_state->videocore_use_count) {
2058 snprintf(err, sizeof(err), " Videocore usecount is 0");
2059 goto output_msg;
2060 }
2061 for (i = 0; i < active_services; i++) {
2062 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2063 if (service_ptr && service_ptr->service_use_count &&
2064 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
2065 snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
2066 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2067 service_ptr->base.fourcc),
2068 service_ptr->client_id,
2069 service_ptr->service_use_count,
2070 service_ptr->service_use_count ==
2071 vc_use_count ? "" : " (+ more)");
2072 break;
2073 }
2074 }
2075
2076 output_msg:
2077 vchiq_log_error(vchiq_susp_log_level,
2078 "timed out waiting for vc suspend (%d).%s",
2079 arm_state->autosuspend_override, err);
2080
2081 }
2082
2083 /* Try to get videocore into suspended state, regardless of autosuspend state.
2084 ** We don't actually force suspend, since videocore may get into a bad state
2085 ** if we force suspend at a bad time. Instead, we wait for autosuspend to
2086 ** determine a good point to suspend. If this doesn't happen within 100ms we
2087 ** report failure.
2088 **
2089 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2090 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2091 */
2092 VCHIQ_STATUS_T
2093 vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
2094 {
2095 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2096 VCHIQ_STATUS_T status = VCHIQ_ERROR;
2097 long rc = 0;
2098 int repeat = -1;
2099
2100 if (!arm_state)
2101 goto out;
2102
2103 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2104
2105 write_lock_bh(&arm_state->susp_res_lock);
2106
2107 status = block_resume(arm_state);
2108 if (status != VCHIQ_SUCCESS)
2109 goto unlock;
2110 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2111 /* Already suspended - just block resume and exit */
2112 vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
2113 __func__);
2114 status = VCHIQ_SUCCESS;
2115 goto unlock;
2116 } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
2117 /* initiate suspend immediately in the case that we're waiting
2118 * for the timeout */
2119 stop_suspend_timer(arm_state);
2120 if (!vchiq_videocore_wanted(state)) {
2121 vchiq_log_info(vchiq_susp_log_level, "%s videocore "
2122 "idle, initiating suspend", __func__);
2123 status = vchiq_arm_vcsuspend(state);
2124 } else if (arm_state->autosuspend_override <
2125 FORCE_SUSPEND_FAIL_MAX) {
2126 vchiq_log_info(vchiq_susp_log_level, "%s letting "
2127 "videocore go idle", __func__);
2128 status = VCHIQ_SUCCESS;
2129 } else {
2130 vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
2131 "many times - attempting suspend", __func__);
2132 status = vchiq_arm_vcsuspend(state);
2133 }
2134 } else {
2135 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
2136 "in progress - wait for completion", __func__);
2137 status = VCHIQ_SUCCESS;
2138 }
2139
2140 /* Wait for suspend to happen due to system idle (not forced..) */
2141 if (status != VCHIQ_SUCCESS)
2142 goto unblock_resume;
2143
2144 do {
2145 write_unlock_bh(&arm_state->susp_res_lock);
2146
2147 rc = wait_for_completion_interruptible_timeout(
2148 &arm_state->vc_suspend_complete,
2149 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
2150
2151 write_lock_bh(&arm_state->susp_res_lock);
2152 if (rc < 0) {
2153 vchiq_log_warning(vchiq_susp_log_level, "%s "
2154 "interrupted waiting for suspend", __func__);
2155 status = VCHIQ_ERROR;
2156 goto unblock_resume;
2157 } else if (rc == 0) {
2158 if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
2159 /* Repeat timeout once if in progress */
2160 if (repeat < 0) {
2161 repeat = 1;
2162 continue;
2163 }
2164 }
2165 arm_state->autosuspend_override++;
2166 output_timeout_error(state);
2167
2168 status = VCHIQ_RETRY;
2169 goto unblock_resume;
2170 }
2171 } while (0 < (repeat--));
2172
2173 /* Check and report state in case we need to abort ARM suspend */
2174 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
2175 status = VCHIQ_RETRY;
2176 vchiq_log_error(vchiq_susp_log_level,
2177 "%s videocore suspend failed (state %s)", __func__,
2178 suspend_state_names[arm_state->vc_suspend_state +
2179 VC_SUSPEND_NUM_OFFSET]);
2180 /* Reset the state only if it's still in an error state.
2181 * Something could have already initiated another suspend. */
2182 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
2183 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2184
2185 goto unblock_resume;
2186 }
2187
2188 /* successfully suspended - unlock and exit */
2189 goto unlock;
2190
2191 unblock_resume:
2192 /* all error states need to unblock resume before exit */
2193 unblock_resume(arm_state);
2194
2195 unlock:
2196 write_unlock_bh(&arm_state->susp_res_lock);
2197
2198 out:
2199 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2200 return status;
2201 }
2202
2203 void
2204 vchiq_check_suspend(VCHIQ_STATE_T *state)
2205 {
2206 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2207
2208 if (!arm_state)
2209 goto out;
2210
2211 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2212
2213 write_lock_bh(&arm_state->susp_res_lock);
2214 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2215 arm_state->first_connect &&
2216 !vchiq_videocore_wanted(state)) {
2217 vchiq_arm_vcsuspend(state);
2218 }
2219 write_unlock_bh(&arm_state->susp_res_lock);
2220
2221 out:
2222 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2223 return;
2224 }
2225
2226
2227 int
2228 vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
2229 {
2230 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2231 int resume = 0;
2232 int ret = -1;
2233
2234 if (!arm_state)
2235 goto out;
2236
2237 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2238
2239 write_lock_bh(&arm_state->susp_res_lock);
2240 unblock_resume(arm_state);
2241 resume = vchiq_check_resume(state);
2242 write_unlock_bh(&arm_state->susp_res_lock);
2243
2244 if (resume) {
2245 if (wait_for_completion_interruptible(
2246 &arm_state->vc_resume_complete) < 0) {
2247 vchiq_log_error(vchiq_susp_log_level,
2248 "%s interrupted", __func__);
2249 /* failed, cannot accurately derive suspend
2250 * state, so exit early. */
2251 goto out;
2252 }
2253 }
2254
2255 read_lock_bh(&arm_state->susp_res_lock);
2256 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2257 vchiq_log_info(vchiq_susp_log_level,
2258 "%s: Videocore remains suspended", __func__);
2259 } else {
2260 vchiq_log_info(vchiq_susp_log_level,
2261 "%s: Videocore resumed", __func__);
2262 ret = 0;
2263 }
2264 read_unlock_bh(&arm_state->susp_res_lock);
2265 out:
2266 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2267 return ret;
2268 }
2269
2270 /* This function should be called with the write lock held */
2271 int
2272 vchiq_check_resume(VCHIQ_STATE_T *state)
2273 {
2274 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2275 int resume = 0;
2276
2277 if (!arm_state)
2278 goto out;
2279
2280 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2281
2282 if (need_resume(state)) {
2283 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2284 request_poll(state, NULL, 0);
2285 resume = 1;
2286 }
2287
2288 out:
2289 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2290 return resume;
2291 }
2292
2293 void
2294 vchiq_platform_check_resume(VCHIQ_STATE_T *state)
2295 {
2296 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2297 int res = 0;
2298
2299 if (!arm_state)
2300 goto out;
2301
2302 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2303
2304 write_lock_bh(&arm_state->susp_res_lock);
2305 if (arm_state->wake_address == 0) {
2306 vchiq_log_info(vchiq_susp_log_level,
2307 "%s: already awake", __func__);
2308 goto unlock;
2309 }
2310 if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
2311 vchiq_log_info(vchiq_susp_log_level,
2312 "%s: already resuming", __func__);
2313 goto unlock;
2314 }
2315
2316 if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
2317 set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
2318 res = 1;
2319 } else
2320 vchiq_log_trace(vchiq_susp_log_level,
2321 "%s: not resuming (resume state %s)", __func__,
2322 resume_state_names[arm_state->vc_resume_state +
2323 VC_RESUME_NUM_OFFSET]);
2324
2325 unlock:
2326 write_unlock_bh(&arm_state->susp_res_lock);
2327
2328 if (res)
2329 vchiq_platform_resume(state);
2330
2331 out:
2332 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2333 return;
2334
2335 }
2336
2337
2338
2339 VCHIQ_STATUS_T
2340 vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
2341 enum USE_TYPE_E use_type)
2342 {
2343 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2344 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2345 char entity[16];
2346 int *entity_uc;
2347 int local_uc, local_entity_uc;
2348
2349 if (!arm_state)
2350 goto out;
2351
2352 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2353
2354 if (use_type == USE_TYPE_VCHIQ) {
2355 sprintf(entity, "VCHIQ: ");
2356 entity_uc = &arm_state->peer_use_count;
2357 } else if (service) {
2358 sprintf(entity, "%c%c%c%c:%03d",
2359 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2360 service->client_id);
2361 entity_uc = &service->service_use_count;
2362 } else {
2363 vchiq_log_error(vchiq_susp_log_level, "%s null service "
2364 "ptr", __func__);
2365 ret = VCHIQ_ERROR;
2366 goto out;
2367 }
2368
2369 write_lock_bh(&arm_state->susp_res_lock);
2370 while (arm_state->resume_blocked) {
2371 /* If we call 'use' while force suspend is waiting for suspend,
2372 * then we're about to block the thread which the force is
2373 * waiting to complete, so we're bound to just time out. In this
2374 * case, set the suspend state such that the wait will be
2375 * canceled, so we can complete as quickly as possible. */
2376 if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2377 VC_SUSPEND_IDLE) {
2378 set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2379 break;
2380 }
2381 /* If suspend is already in progress then we need to block */
2382 if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2383 /* Indicate that there are threads waiting on the resume
2384 * blocker. These need to be allowed to complete before
2385 * a _second_ call to force suspend can complete,
2386 * otherwise low priority threads might never actually
2387 * continue */
2388 arm_state->blocked_count++;
2389 write_unlock_bh(&arm_state->susp_res_lock);
2390 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2391 "blocked - waiting...", __func__, entity);
2392 if (wait_for_completion_killable(
2393 &arm_state->resume_blocker) != 0) {
2394 vchiq_log_error(vchiq_susp_log_level, "%s %s "
2395 "wait for resume blocker interrupted",
2396 __func__, entity);
2397 ret = VCHIQ_ERROR;
2398 write_lock_bh(&arm_state->susp_res_lock);
2399 arm_state->blocked_count--;
2400 write_unlock_bh(&arm_state->susp_res_lock);
2401 goto out;
2402 }
2403 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2404 "unblocked", __func__, entity);
2405 write_lock_bh(&arm_state->susp_res_lock);
2406 if (--arm_state->blocked_count == 0)
2407 complete_all(&arm_state->blocked_blocker);
2408 }
2409 }
2410
2411 stop_suspend_timer(arm_state);
2412
2413 local_uc = ++arm_state->videocore_use_count;
2414 local_entity_uc = ++(*entity_uc);
2415
2416 /* If there's a pending request which hasn't yet been serviced then
2417 * just clear it. If we're past VC_SUSPEND_REQUESTED state then
2418 * vc_resume_complete will block until we either resume or fail to
2419 * suspend */
2420 if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2421 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2422
2423 if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2424 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2425 vchiq_log_info(vchiq_susp_log_level,
2426 "%s %s count %d, state count %d",
2427 __func__, entity, local_entity_uc, local_uc);
2428 request_poll(state, NULL, 0);
2429 } else
2430 vchiq_log_trace(vchiq_susp_log_level,
2431 "%s %s count %d, state count %d",
2432 __func__, entity, *entity_uc, local_uc);
2433
2434
2435 write_unlock_bh(&arm_state->susp_res_lock);
2436
2437 /* Completion is in a done state when we're not suspended, so this won't
2438 * block for the non-suspended case. */
2439 if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
2440 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
2441 __func__, entity);
2442 if (wait_for_completion_killable(
2443 &arm_state->vc_resume_complete) != 0) {
2444 vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
2445 "resume interrupted", __func__, entity);
2446 ret = VCHIQ_ERROR;
2447 goto out;
2448 }
2449 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
2450 entity);
2451 }
2452
2453 if (ret == VCHIQ_SUCCESS) {
2454 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2455 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2456 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2457 /* Send the use notify to videocore */
2458 status = vchiq_send_remote_use_active(state);
2459 if (status == VCHIQ_SUCCESS)
2460 ack_cnt--;
2461 else
2462 atomic_add(ack_cnt,
2463 &arm_state->ka_use_ack_count);
2464 }
2465 }
2466
2467 out:
2468 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2469 return ret;
2470 }
2471
2472 VCHIQ_STATUS_T
2473 vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
2474 {
2475 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2476 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2477 char entity[16];
2478 int *entity_uc;
2479 int local_uc, local_entity_uc;
2480
2481 if (!arm_state)
2482 goto out;
2483
2484 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2485
2486 if (service) {
2487 sprintf(entity, "%c%c%c%c:%03d",
2488 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2489 service->client_id);
2490 entity_uc = &service->service_use_count;
2491 } else {
2492 sprintf(entity, "PEER: ");
2493 entity_uc = &arm_state->peer_use_count;
2494 }
2495
2496 write_lock_bh(&arm_state->susp_res_lock);
2497 if (!arm_state->videocore_use_count || !(*entity_uc)) {
2498 /* Don't use BUG_ON - don't allow user thread to crash kernel */
2499 WARN_ON(!arm_state->videocore_use_count);
2500 WARN_ON(!(*entity_uc));
2501 ret = VCHIQ_ERROR;
2502 goto unlock;
2503 }
2504 local_uc = --arm_state->videocore_use_count;
2505 local_entity_uc = --(*entity_uc);
2506
2507 if (!vchiq_videocore_wanted(state)) {
2508 if (vchiq_platform_use_suspend_timer() &&
2509 !arm_state->resume_blocked) {
2510 /* Only use the timer if we're not trying to force
2511 * suspend (=> resume_blocked) */
2512 start_suspend_timer(arm_state);
2513 } else {
2514 vchiq_log_info(vchiq_susp_log_level,
2515 "%s %s count %d, state count %d - suspending",
2516 __func__, entity, *entity_uc,
2517 arm_state->videocore_use_count);
2518 vchiq_arm_vcsuspend(state);
2519 }
2520 } else
2521 vchiq_log_trace(vchiq_susp_log_level,
2522 "%s %s count %d, state count %d",
2523 __func__, entity, *entity_uc,
2524 arm_state->videocore_use_count);
2525
2526 unlock:
2527 write_unlock_bh(&arm_state->susp_res_lock);
2528
2529 out:
2530 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2531 return ret;
2532 }
2533
2534 void
2535 vchiq_on_remote_use(VCHIQ_STATE_T *state)
2536 {
2537 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2538 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2539 atomic_inc(&arm_state->ka_use_count);
2540 complete(&arm_state->ka_evt);
2541 }
2542
2543 void
2544 vchiq_on_remote_release(VCHIQ_STATE_T *state)
2545 {
2546 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2547 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2548 atomic_inc(&arm_state->ka_release_count);
2549 complete(&arm_state->ka_evt);
2550 }
2551
2552 VCHIQ_STATUS_T
2553 vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
2554 {
2555 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2556 }
2557
2558 VCHIQ_STATUS_T
2559 vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
2560 {
2561 return vchiq_release_internal(service->state, service);
2562 }
2563
2564 VCHIQ_DEBUGFS_NODE_T *
2565 vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
2566 {
2567 return &instance->debugfs_node;
2568 }
2569
2570 int
2571 vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
2572 {
2573 VCHIQ_SERVICE_T *service;
2574 int use_count = 0, i;
2575 i = 0;
2576 while ((service = next_service_by_instance(instance->state,
2577 instance, &i)) != NULL) {
2578 use_count += service->service_use_count;
2579 unlock_service(service);
2580 }
2581 return use_count;
2582 }
2583
2584 int
2585 vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
2586 {
2587 return instance->pid;
2588 }
2589
2590 int
2591 vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
2592 {
2593 return instance->trace;
2594 }
2595
2596 void
2597 vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
2598 {
2599 VCHIQ_SERVICE_T *service;
2600 int i;
2601 i = 0;
2602 while ((service = next_service_by_instance(instance->state,
2603 instance, &i)) != NULL) {
2604 service->trace = trace;
2605 unlock_service(service);
2606 }
2607 instance->trace = (trace != 0);
2608 }
2609
2610 static void suspend_timer_callback(unsigned long context)
2611 {
2612 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
2613 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2614 if (!arm_state)
2615 goto out;
2616 vchiq_log_info(vchiq_susp_log_level,
2617 "%s - suspend timer expired - check suspend", __func__);
2618 vchiq_check_suspend(state);
2619 out:
2620 return;
2621 }
2622
2623 VCHIQ_STATUS_T
2624 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
2625 {
2626 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2627 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2628 if (service) {
2629 ret = vchiq_use_internal(service->state, service,
2630 USE_TYPE_SERVICE_NO_RESUME);
2631 unlock_service(service);
2632 }
2633 return ret;
2634 }
2635
2636 VCHIQ_STATUS_T
2637 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
2638 {
2639 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2640 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2641 if (service) {
2642 ret = vchiq_use_internal(service->state, service,
2643 USE_TYPE_SERVICE);
2644 unlock_service(service);
2645 }
2646 return ret;
2647 }
2648
2649 VCHIQ_STATUS_T
2650 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
2651 {
2652 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2653 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2654 if (service) {
2655 ret = vchiq_release_internal(service->state, service);
2656 unlock_service(service);
2657 }
2658 return ret;
2659 }
2660
2661 void
2662 vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
2663 {
2664 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2665 int i, j = 0;
2666 /* Only dump 64 services */
2667 static const int local_max_services = 64;
2668 /* If there's more than 64 services, only dump ones with
2669 * non-zero counts */
2670 int only_nonzero = 0;
2671 static const char *nz = "<-- preventing suspend";
2672
2673 enum vc_suspend_status vc_suspend_state;
2674 enum vc_resume_status vc_resume_state;
2675 int peer_count;
2676 int vc_use_count;
2677 int active_services;
2678 struct service_data_struct {
2679 int fourcc;
2680 int clientid;
2681 int use_count;
2682 } service_data[local_max_services];
2683
2684 if (!arm_state)
2685 return;
2686
2687 read_lock_bh(&arm_state->susp_res_lock);
2688 vc_suspend_state = arm_state->vc_suspend_state;
2689 vc_resume_state = arm_state->vc_resume_state;
2690 peer_count = arm_state->peer_use_count;
2691 vc_use_count = arm_state->videocore_use_count;
2692 active_services = state->unused_service;
2693 if (active_services > local_max_services)
2694 only_nonzero = 1;
2695
2696 for (i = 0; (i < active_services) && (j < local_max_services); i++) {
2697 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2698 if (!service_ptr)
2699 continue;
2700
2701 if (only_nonzero && !service_ptr->service_use_count)
2702 continue;
2703
2704 if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
2705 service_data[j].fourcc = service_ptr->base.fourcc;
2706 service_data[j].clientid = service_ptr->client_id;
2707 service_data[j++].use_count = service_ptr->
2708 service_use_count;
2709 }
2710 }
2711
2712 read_unlock_bh(&arm_state->susp_res_lock);
2713
2714 vchiq_log_warning(vchiq_susp_log_level,
2715 "-- Videcore suspend state: %s --",
2716 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
2717 vchiq_log_warning(vchiq_susp_log_level,
2718 "-- Videcore resume state: %s --",
2719 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
2720
2721 if (only_nonzero)
2722 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2723 "services (%d). Only dumping up to first %d services "
2724 "with non-zero use-count", active_services,
2725 local_max_services);
2726
2727 for (i = 0; i < j; i++) {
2728 vchiq_log_warning(vchiq_susp_log_level,
2729 "----- %c%c%c%c:%d service count %d %s",
2730 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2731 service_data[i].clientid,
2732 service_data[i].use_count,
2733 service_data[i].use_count ? nz : "");
2734 }
2735 vchiq_log_warning(vchiq_susp_log_level,
2736 "----- VCHIQ use count count %d", peer_count);
2737 vchiq_log_warning(vchiq_susp_log_level,
2738 "--- Overall vchiq instance use count %d", vc_use_count);
2739
2740 vchiq_dump_platform_use_state(state);
2741 }
2742
2743 VCHIQ_STATUS_T
2744 vchiq_check_service(VCHIQ_SERVICE_T *service)
2745 {
2746 VCHIQ_ARM_STATE_T *arm_state;
2747 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2748
2749 if (!service || !service->state)
2750 goto out;
2751
2752 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2753
2754 arm_state = vchiq_platform_get_arm_state(service->state);
2755
2756 read_lock_bh(&arm_state->susp_res_lock);
2757 if (service->service_use_count)
2758 ret = VCHIQ_SUCCESS;
2759 read_unlock_bh(&arm_state->susp_res_lock);
2760
2761 if (ret == VCHIQ_ERROR) {
2762 vchiq_log_error(vchiq_susp_log_level,
2763 "%s ERROR - %c%c%c%c:%d service count %d, "
2764 "state count %d, videocore suspend state %s", __func__,
2765 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2766 service->client_id, service->service_use_count,
2767 arm_state->videocore_use_count,
2768 suspend_state_names[arm_state->vc_suspend_state +
2769 VC_SUSPEND_NUM_OFFSET]);
2770 vchiq_dump_service_use_state(service->state);
2771 }
2772 out:
2773 return ret;
2774 }
2775
2776 /* stub functions */
2777 void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
2778 {
2779 (void)state;
2780 }
2781
2782 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
2783 VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
2784 {
2785 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2786 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2787 get_conn_state_name(oldstate), get_conn_state_name(newstate));
2788 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
2789 write_lock_bh(&arm_state->susp_res_lock);
2790 if (!arm_state->first_connect) {
2791 char threadname[10];
2792 arm_state->first_connect = 1;
2793 write_unlock_bh(&arm_state->susp_res_lock);
2794 snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
2795 state->id);
2796 arm_state->ka_thread = kthread_create(
2797 &vchiq_keepalive_thread_func,
2798 (void *)state,
2799 threadname);
2800 if (IS_ERR(arm_state->ka_thread)) {
2801 vchiq_log_error(vchiq_susp_log_level,
2802 "vchiq: FATAL: couldn't create thread %s",
2803 threadname);
2804 } else {
2805 wake_up_process(arm_state->ka_thread);
2806 }
2807 } else
2808 write_unlock_bh(&arm_state->susp_res_lock);
2809 }
2810 }
2811
2812 static int vchiq_probe(struct platform_device *pdev)
2813 {
2814 struct device_node *fw_node;
2815 struct rpi_firmware *fw;
2816 int err;
2817 void *ptr_err;
2818
2819 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
2820 if (!fw_node) {
2821 dev_err(&pdev->dev, "Missing firmware node\n");
2822 return -ENOENT;
2823 }
2824
2825 fw = rpi_firmware_get(fw_node);
2826 of_node_put(fw_node);
2827 if (!fw)
2828 return -EPROBE_DEFER;
2829
2830 platform_set_drvdata(pdev, fw);
2831
2832 err = vchiq_platform_init(pdev, &g_state);
2833 if (err != 0)
2834 goto failed_platform_init;
2835
2836 err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
2837 if (err != 0) {
2838 vchiq_log_error(vchiq_arm_log_level,
2839 "Unable to allocate device number");
2840 goto failed_platform_init;
2841 }
2842 cdev_init(&vchiq_cdev, &vchiq_fops);
2843 vchiq_cdev.owner = THIS_MODULE;
2844 err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
2845 if (err != 0) {
2846 vchiq_log_error(vchiq_arm_log_level,
2847 "Unable to register device");
2848 goto failed_cdev_add;
2849 }
2850
2851 /* create sysfs entries */
2852 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
2853 ptr_err = vchiq_class;
2854 if (IS_ERR(ptr_err))
2855 goto failed_class_create;
2856
2857 vchiq_dev = device_create(vchiq_class, NULL,
2858 vchiq_devid, NULL, "vchiq");
2859 ptr_err = vchiq_dev;
2860 if (IS_ERR(ptr_err))
2861 goto failed_device_create;
2862
2863 /* create debugfs entries */
2864 err = vchiq_debugfs_init();
2865 if (err != 0)
2866 goto failed_debugfs_init;
2867
2868 vchiq_log_info(vchiq_arm_log_level,
2869 "vchiq: initialised - version %d (min %d), device %d.%d",
2870 VCHIQ_VERSION, VCHIQ_VERSION_MIN,
2871 MAJOR(vchiq_devid), MINOR(vchiq_devid));
2872
2873 return 0;
2874
2875 failed_debugfs_init:
2876 device_destroy(vchiq_class, vchiq_devid);
2877 failed_device_create:
2878 class_destroy(vchiq_class);
2879 failed_class_create:
2880 cdev_del(&vchiq_cdev);
2881 err = PTR_ERR(ptr_err);
2882 failed_cdev_add:
2883 unregister_chrdev_region(vchiq_devid, 1);
2884 failed_platform_init:
2885 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2886 return err;
2887 }
2888
2889 static int vchiq_remove(struct platform_device *pdev)
2890 {
2891 vchiq_debugfs_deinit();
2892 device_destroy(vchiq_class, vchiq_devid);
2893 class_destroy(vchiq_class);
2894 cdev_del(&vchiq_cdev);
2895 unregister_chrdev_region(vchiq_devid, 1);
2896
2897 return 0;
2898 }
2899
2900 static const struct of_device_id vchiq_of_match[] = {
2901 { .compatible = "brcm,bcm2835-vchiq", },
2902 {},
2903 };
2904 MODULE_DEVICE_TABLE(of, vchiq_of_match);
2905
2906 static struct platform_driver vchiq_driver = {
2907 .driver = {
2908 .name = "bcm2835_vchiq",
2909 .of_match_table = vchiq_of_match,
2910 },
2911 .probe = vchiq_probe,
2912 .remove = vchiq_remove,
2913 };
2914 module_platform_driver(vchiq_driver);
2915
2916 MODULE_LICENSE("GPL");
2917 MODULE_DESCRIPTION("Videocore VCHIQ driver");
2918 MODULE_AUTHOR("Broadcom Corporation");