]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
vchiq_arm: Access the dequeue_pending flag locked
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_arm.c
CommitLineData
71bad7f0 1/**
2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
3 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The names of the above-listed copyright holders may not be used
15 * to endorse or promote products derived from this software without
16 * specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2, as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/cdev.h>
40#include <linux/fs.h>
41#include <linux/device.h>
42#include <linux/mm.h>
43#include <linux/highmem.h>
44#include <linux/pagemap.h>
45#include <linux/bug.h>
46#include <linux/semaphore.h>
47#include <linux/list.h>
48#include <linux/of.h>
49#include <linux/platform_device.h>
50#include <soc/bcm2835/raspberrypi-firmware.h>
51
52#include "vchiq_core.h"
53#include "vchiq_ioctl.h"
54#include "vchiq_arm.h"
55#include "vchiq_debugfs.h"
56#include "vchiq_killable.h"
57
58#define DEVICE_NAME "vchiq"
59
60/* Override the default prefix, which would be vchiq_arm (from the filename) */
61#undef MODULE_PARAM_PREFIX
62#define MODULE_PARAM_PREFIX DEVICE_NAME "."
63
64#define VCHIQ_MINOR 0
65
66/* Some per-instance constants */
67#define MAX_COMPLETIONS 16
68#define MAX_SERVICES 64
69#define MAX_ELEMENTS 8
70#define MSG_QUEUE_SIZE 64
71
72#define KEEPALIVE_VER 1
73#define KEEPALIVE_VER_MIN KEEPALIVE_VER
74
75/* Run time control of log level, based on KERN_XXX level. */
76int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
77int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
78
79#define SUSPEND_TIMER_TIMEOUT_MS 100
80#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
81
82#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
83static const char *const suspend_state_names[] = {
84 "VC_SUSPEND_FORCE_CANCELED",
85 "VC_SUSPEND_REJECTED",
86 "VC_SUSPEND_FAILED",
87 "VC_SUSPEND_IDLE",
88 "VC_SUSPEND_REQUESTED",
89 "VC_SUSPEND_IN_PROGRESS",
90 "VC_SUSPEND_SUSPENDED"
91};
92#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
93static const char *const resume_state_names[] = {
94 "VC_RESUME_FAILED",
95 "VC_RESUME_IDLE",
96 "VC_RESUME_REQUESTED",
97 "VC_RESUME_IN_PROGRESS",
98 "VC_RESUME_RESUMED"
99};
100/* The number of times we allow force suspend to timeout before actually
101** _forcing_ suspend. This is to cater for SW which fails to release vchiq
102** correctly - we don't want to prevent ARM suspend indefinitely in this case.
103*/
104#define FORCE_SUSPEND_FAIL_MAX 8
105
106/* The time in ms allowed for videocore to go idle when force suspend has been
107 * requested */
108#define FORCE_SUSPEND_TIMEOUT_MS 200
109
110
111static void suspend_timer_callback(unsigned long context);
112
113
114typedef struct user_service_struct {
115 VCHIQ_SERVICE_T *service;
116 void *userdata;
117 VCHIQ_INSTANCE_T instance;
118 char is_vchi;
119 char dequeue_pending;
120 char close_pending;
121 int message_available_pos;
122 int msg_insert;
123 int msg_remove;
124 struct semaphore insert_event;
125 struct semaphore remove_event;
126 struct semaphore close_event;
127 VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
128} USER_SERVICE_T;
129
130struct bulk_waiter_node {
131 struct bulk_waiter bulk_waiter;
132 int pid;
133 struct list_head list;
134};
135
136struct vchiq_instance_struct {
137 VCHIQ_STATE_T *state;
138 VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
139 int completion_insert;
140 int completion_remove;
141 struct semaphore insert_event;
142 struct semaphore remove_event;
143 struct mutex completion_mutex;
144
145 int connected;
146 int closing;
147 int pid;
148 int mark;
149 int use_close_delivered;
150 int trace;
151
152 struct list_head bulk_waiter_list;
153 struct mutex bulk_waiter_list_mutex;
154
155 VCHIQ_DEBUGFS_NODE_T debugfs_node;
156};
157
158typedef struct dump_context_struct {
159 char __user *buf;
160 size_t actual;
161 size_t space;
162 loff_t offset;
163} DUMP_CONTEXT_T;
164
165static struct cdev vchiq_cdev;
166static dev_t vchiq_devid;
167static VCHIQ_STATE_T g_state;
168static struct class *vchiq_class;
169static struct device *vchiq_dev;
170static DEFINE_SPINLOCK(msg_queue_spinlock);
171
172static const char *const ioctl_names[] = {
173 "CONNECT",
174 "SHUTDOWN",
175 "CREATE_SERVICE",
176 "REMOVE_SERVICE",
177 "QUEUE_MESSAGE",
178 "QUEUE_BULK_TRANSMIT",
179 "QUEUE_BULK_RECEIVE",
180 "AWAIT_COMPLETION",
181 "DEQUEUE_MESSAGE",
182 "GET_CLIENT_ID",
183 "GET_CONFIG",
184 "CLOSE_SERVICE",
185 "USE_SERVICE",
186 "RELEASE_SERVICE",
187 "SET_SERVICE_OPTION",
188 "DUMP_PHYS_MEM",
189 "LIB_VERSION",
190 "CLOSE_DELIVERED"
191};
192
5209f93b 193vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
194 (VCHIQ_IOC_MAX + 1));
71bad7f0 195
196static void
197dump_phys_mem(void *virt_addr, uint32_t num_bytes);
198
199/****************************************************************************
200*
201* add_completion
202*
203***************************************************************************/
204
205static VCHIQ_STATUS_T
206add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
207 VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
208 void *bulk_userdata)
209{
210 VCHIQ_COMPLETION_DATA_T *completion;
211 DEBUG_INITIALISE(g_state.local)
212
213 while (instance->completion_insert ==
214 (instance->completion_remove + MAX_COMPLETIONS)) {
215 /* Out of space - wait for the client */
216 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
217 vchiq_log_trace(vchiq_arm_log_level,
218 "add_completion - completion queue full");
219 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
220 if (down_interruptible(&instance->remove_event) != 0) {
221 vchiq_log_info(vchiq_arm_log_level,
222 "service_callback interrupted");
223 return VCHIQ_RETRY;
224 } else if (instance->closing) {
225 vchiq_log_info(vchiq_arm_log_level,
226 "service_callback closing");
227 return VCHIQ_ERROR;
228 }
229 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
230 }
231
232 completion =
233 &instance->completions[instance->completion_insert &
234 (MAX_COMPLETIONS - 1)];
235
236 completion->header = header;
237 completion->reason = reason;
238 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
239 completion->service_userdata = user_service->service;
240 completion->bulk_userdata = bulk_userdata;
241
242 if (reason == VCHIQ_SERVICE_CLOSED) {
243 /* Take an extra reference, to be held until
244 this CLOSED notification is delivered. */
245 lock_service(user_service->service);
246 if (instance->use_close_delivered)
247 user_service->close_pending = 1;
248 }
249
250 /* A write barrier is needed here to ensure that the entire completion
251 record is written out before the insert point. */
252 wmb();
253
254 if (reason == VCHIQ_MESSAGE_AVAILABLE)
255 user_service->message_available_pos =
256 instance->completion_insert;
257 instance->completion_insert++;
258
259 up(&instance->insert_event);
260
261 return VCHIQ_SUCCESS;
262}
263
264/****************************************************************************
265*
266* service_callback
267*
268***************************************************************************/
269
270static VCHIQ_STATUS_T
271service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
272 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
273{
274 /* How do we ensure the callback goes to the right client?
275 ** The service_user data points to a USER_SERVICE_T record containing
276 ** the original callback and the user state structure, which contains a
277 ** circular buffer for completion records.
278 */
279 USER_SERVICE_T *user_service;
280 VCHIQ_SERVICE_T *service;
281 VCHIQ_INSTANCE_T instance;
97e6ffd5 282 int skip_completion = 0;
71bad7f0 283 DEBUG_INITIALISE(g_state.local)
284
285 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
286
287 service = handle_to_service(handle);
288 BUG_ON(!service);
289 user_service = (USER_SERVICE_T *)service->base.userdata;
290 instance = user_service->instance;
291
292 if (!instance || instance->closing)
293 return VCHIQ_SUCCESS;
294
295 vchiq_log_trace(vchiq_arm_log_level,
296 "service_callback - service %lx(%d,%p), reason %d, header %lx, "
297 "instance %lx, bulk_userdata %lx",
298 (unsigned long)user_service,
299 service->localport, user_service->userdata,
300 reason, (unsigned long)header,
301 (unsigned long)instance, (unsigned long)bulk_userdata);
302
303 if (header && user_service->is_vchi) {
304 spin_lock(&msg_queue_spinlock);
305 while (user_service->msg_insert ==
306 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
307 spin_unlock(&msg_queue_spinlock);
308 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
309 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
310 vchiq_log_trace(vchiq_arm_log_level,
311 "service_callback - msg queue full");
312 /* If there is no MESSAGE_AVAILABLE in the completion
313 ** queue, add one
314 */
315 if ((user_service->message_available_pos -
316 instance->completion_remove) < 0) {
317 VCHIQ_STATUS_T status;
318 vchiq_log_info(vchiq_arm_log_level,
319 "Inserting extra MESSAGE_AVAILABLE");
320 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
321 status = add_completion(instance, reason,
322 NULL, user_service, bulk_userdata);
323 if (status != VCHIQ_SUCCESS) {
324 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
325 return status;
326 }
327 }
328
329 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
330 if (down_interruptible(&user_service->remove_event)
331 != 0) {
332 vchiq_log_info(vchiq_arm_log_level,
333 "service_callback interrupted");
334 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
335 return VCHIQ_RETRY;
336 } else if (instance->closing) {
337 vchiq_log_info(vchiq_arm_log_level,
338 "service_callback closing");
339 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
340 return VCHIQ_ERROR;
341 }
342 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
343 spin_lock(&msg_queue_spinlock);
344 }
345
346 user_service->msg_queue[user_service->msg_insert &
347 (MSG_QUEUE_SIZE - 1)] = header;
348 user_service->msg_insert++;
71bad7f0 349
350 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
351 ** there is a MESSAGE_AVAILABLE in the completion queue then
352 ** bypass the completion queue.
353 */
354 if (((user_service->message_available_pos -
355 instance->completion_remove) >= 0) ||
356 user_service->dequeue_pending) {
71bad7f0 357 user_service->dequeue_pending = 0;
97e6ffd5 358 skip_completion = 1;
71bad7f0 359 }
360
97e6ffd5
PE
361 spin_unlock(&msg_queue_spinlock);
362
363 up(&user_service->insert_event);
364
71bad7f0 365 header = NULL;
366 }
97e6ffd5
PE
367
368 if (skip_completion) {
369 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
370 return VCHIQ_SUCCESS;
371 }
372
71bad7f0 373 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
374
375 return add_completion(instance, reason, header, user_service,
376 bulk_userdata);
377}
378
379/****************************************************************************
380*
381* user_service_free
382*
383***************************************************************************/
384static void
385user_service_free(void *userdata)
386{
387 kfree(userdata);
388}
389
390/****************************************************************************
391*
392* close_delivered
393*
394***************************************************************************/
395static void close_delivered(USER_SERVICE_T *user_service)
396{
397 vchiq_log_info(vchiq_arm_log_level,
398 "close_delivered(handle=%x)",
399 user_service->service->handle);
400
401 if (user_service->close_pending) {
402 /* Allow the underlying service to be culled */
403 unlock_service(user_service->service);
404
405 /* Wake the user-thread blocked in close_ or remove_service */
406 up(&user_service->close_event);
407
408 user_service->close_pending = 0;
409 }
410}
411
412/****************************************************************************
413*
414* vchiq_ioctl
415*
416***************************************************************************/
417static long
418vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
419{
420 VCHIQ_INSTANCE_T instance = file->private_data;
421 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
422 VCHIQ_SERVICE_T *service = NULL;
423 long ret = 0;
424 int i, rc;
425 DEBUG_INITIALISE(g_state.local)
426
427 vchiq_log_trace(vchiq_arm_log_level,
df044ebf
GKH
428 "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
429 instance,
71bad7f0 430 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
431 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
432 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
433
434 switch (cmd) {
435 case VCHIQ_IOC_SHUTDOWN:
436 if (!instance->connected)
437 break;
438
439 /* Remove all services */
440 i = 0;
441 while ((service = next_service_by_instance(instance->state,
442 instance, &i)) != NULL) {
443 status = vchiq_remove_service(service->handle);
444 unlock_service(service);
445 if (status != VCHIQ_SUCCESS)
446 break;
447 }
448 service = NULL;
449
450 if (status == VCHIQ_SUCCESS) {
451 /* Wake the completion thread and ask it to exit */
452 instance->closing = 1;
453 up(&instance->insert_event);
454 }
455
456 break;
457
458 case VCHIQ_IOC_CONNECT:
459 if (instance->connected) {
460 ret = -EINVAL;
461 break;
462 }
b826d73b 463 rc = mutex_lock_killable(&instance->state->mutex);
71bad7f0 464 if (rc != 0) {
465 vchiq_log_error(vchiq_arm_log_level,
466 "vchiq: connect: could not lock mutex for "
467 "state %d: %d",
468 instance->state->id, rc);
469 ret = -EINTR;
470 break;
471 }
472 status = vchiq_connect_internal(instance->state, instance);
473 mutex_unlock(&instance->state->mutex);
474
475 if (status == VCHIQ_SUCCESS)
476 instance->connected = 1;
477 else
478 vchiq_log_error(vchiq_arm_log_level,
479 "vchiq: could not connect: %d", status);
480 break;
481
482 case VCHIQ_IOC_CREATE_SERVICE: {
483 VCHIQ_CREATE_SERVICE_T args;
484 USER_SERVICE_T *user_service = NULL;
485 void *userdata;
486 int srvstate;
487
488 if (copy_from_user
489 (&args, (const void __user *)arg,
490 sizeof(args)) != 0) {
491 ret = -EFAULT;
492 break;
493 }
494
495 user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
496 if (!user_service) {
497 ret = -ENOMEM;
498 break;
499 }
500
501 if (args.is_open) {
502 if (!instance->connected) {
503 ret = -ENOTCONN;
504 kfree(user_service);
505 break;
506 }
507 srvstate = VCHIQ_SRVSTATE_OPENING;
508 } else {
509 srvstate =
510 instance->connected ?
511 VCHIQ_SRVSTATE_LISTENING :
512 VCHIQ_SRVSTATE_HIDDEN;
513 }
514
515 userdata = args.params.userdata;
516 args.params.callback = service_callback;
517 args.params.userdata = user_service;
518 service = vchiq_add_service_internal(
519 instance->state,
520 &args.params, srvstate,
521 instance, user_service_free);
522
523 if (service != NULL) {
524 user_service->service = service;
525 user_service->userdata = userdata;
526 user_service->instance = instance;
527 user_service->is_vchi = (args.is_vchi != 0);
528 user_service->dequeue_pending = 0;
529 user_service->close_pending = 0;
530 user_service->message_available_pos =
531 instance->completion_remove - 1;
532 user_service->msg_insert = 0;
533 user_service->msg_remove = 0;
534 sema_init(&user_service->insert_event, 0);
535 sema_init(&user_service->remove_event, 0);
536 sema_init(&user_service->close_event, 0);
537
538 if (args.is_open) {
539 status = vchiq_open_service_internal
540 (service, instance->pid);
541 if (status != VCHIQ_SUCCESS) {
542 vchiq_remove_service(service->handle);
543 service = NULL;
544 ret = (status == VCHIQ_RETRY) ?
545 -EINTR : -EIO;
546 break;
547 }
548 }
549
550 if (copy_to_user((void __user *)
551 &(((VCHIQ_CREATE_SERVICE_T __user *)
552 arg)->handle),
553 (const void *)&service->handle,
554 sizeof(service->handle)) != 0) {
555 ret = -EFAULT;
556 vchiq_remove_service(service->handle);
557 }
558
559 service = NULL;
560 } else {
561 ret = -EEXIST;
562 kfree(user_service);
563 }
564 } break;
565
566 case VCHIQ_IOC_CLOSE_SERVICE: {
567 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
568
569 service = find_service_for_instance(instance, handle);
570 if (service != NULL) {
571 USER_SERVICE_T *user_service =
572 (USER_SERVICE_T *)service->base.userdata;
573 /* close_pending is false on first entry, and when the
574 wait in vchiq_close_service has been interrupted. */
575 if (!user_service->close_pending) {
576 status = vchiq_close_service(service->handle);
577 if (status != VCHIQ_SUCCESS)
578 break;
579 }
580
581 /* close_pending is true once the underlying service
582 has been closed until the client library calls the
583 CLOSE_DELIVERED ioctl, signalling close_event. */
584 if (user_service->close_pending &&
585 down_interruptible(&user_service->close_event))
586 status = VCHIQ_RETRY;
587 }
588 else
589 ret = -EINVAL;
590 } break;
591
592 case VCHIQ_IOC_REMOVE_SERVICE: {
593 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
594
595 service = find_service_for_instance(instance, handle);
596 if (service != NULL) {
597 USER_SERVICE_T *user_service =
598 (USER_SERVICE_T *)service->base.userdata;
599 /* close_pending is false on first entry, and when the
600 wait in vchiq_close_service has been interrupted. */
601 if (!user_service->close_pending) {
602 status = vchiq_remove_service(service->handle);
603 if (status != VCHIQ_SUCCESS)
604 break;
605 }
606
607 /* close_pending is true once the underlying service
608 has been closed until the client library calls the
609 CLOSE_DELIVERED ioctl, signalling close_event. */
610 if (user_service->close_pending &&
611 down_interruptible(&user_service->close_event))
612 status = VCHIQ_RETRY;
613 }
614 else
615 ret = -EINVAL;
616 } break;
617
618 case VCHIQ_IOC_USE_SERVICE:
619 case VCHIQ_IOC_RELEASE_SERVICE: {
620 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
621
622 service = find_service_for_instance(instance, handle);
623 if (service != NULL) {
624 status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
625 vchiq_use_service_internal(service) :
626 vchiq_release_service_internal(service);
627 if (status != VCHIQ_SUCCESS) {
628 vchiq_log_error(vchiq_susp_log_level,
629 "%s: cmd %s returned error %d for "
630 "service %c%c%c%c:%03d",
631 __func__,
632 (cmd == VCHIQ_IOC_USE_SERVICE) ?
633 "VCHIQ_IOC_USE_SERVICE" :
634 "VCHIQ_IOC_RELEASE_SERVICE",
635 status,
636 VCHIQ_FOURCC_AS_4CHARS(
637 service->base.fourcc),
638 service->client_id);
639 ret = -EINVAL;
640 }
641 } else
642 ret = -EINVAL;
643 } break;
644
645 case VCHIQ_IOC_QUEUE_MESSAGE: {
646 VCHIQ_QUEUE_MESSAGE_T args;
647 if (copy_from_user
648 (&args, (const void __user *)arg,
649 sizeof(args)) != 0) {
650 ret = -EFAULT;
651 break;
652 }
653
654 service = find_service_for_instance(instance, args.handle);
655
656 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
657 /* Copy elements into kernel space */
658 VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
659 if (copy_from_user(elements, args.elements,
660 args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
c04feb11 661 status = vchiq_queue_message
71bad7f0 662 (args.handle,
663 elements, args.count);
664 else
665 ret = -EFAULT;
666 } else {
667 ret = -EINVAL;
668 }
669 } break;
670
671 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
672 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
673 VCHIQ_QUEUE_BULK_TRANSFER_T args;
674 struct bulk_waiter_node *waiter = NULL;
675 VCHIQ_BULK_DIR_T dir =
676 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
677 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
678
679 if (copy_from_user
680 (&args, (const void __user *)arg,
681 sizeof(args)) != 0) {
682 ret = -EFAULT;
683 break;
684 }
685
686 service = find_service_for_instance(instance, args.handle);
687 if (!service) {
688 ret = -EINVAL;
689 break;
690 }
691
692 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
693 waiter = kzalloc(sizeof(struct bulk_waiter_node),
694 GFP_KERNEL);
695 if (!waiter) {
696 ret = -ENOMEM;
697 break;
698 }
699 args.userdata = &waiter->bulk_waiter;
700 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
701 struct list_head *pos;
702 mutex_lock(&instance->bulk_waiter_list_mutex);
703 list_for_each(pos, &instance->bulk_waiter_list) {
704 if (list_entry(pos, struct bulk_waiter_node,
705 list)->pid == current->pid) {
706 waiter = list_entry(pos,
707 struct bulk_waiter_node,
708 list);
709 list_del(pos);
710 break;
711 }
712
713 }
714 mutex_unlock(&instance->bulk_waiter_list_mutex);
715 if (!waiter) {
716 vchiq_log_error(vchiq_arm_log_level,
717 "no bulk_waiter found for pid %d",
718 current->pid);
719 ret = -ESRCH;
720 break;
721 }
722 vchiq_log_info(vchiq_arm_log_level,
df044ebf
GKH
723 "found bulk_waiter %pK for pid %d", waiter,
724 current->pid);
71bad7f0 725 args.userdata = &waiter->bulk_waiter;
726 }
727 status = vchiq_bulk_transfer
728 (args.handle,
729 VCHI_MEM_HANDLE_INVALID,
730 args.data, args.size,
731 args.userdata, args.mode,
732 dir);
733 if (!waiter)
734 break;
735 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
736 !waiter->bulk_waiter.bulk) {
737 if (waiter->bulk_waiter.bulk) {
738 /* Cancel the signal when the transfer
739 ** completes. */
740 spin_lock(&bulk_waiter_spinlock);
741 waiter->bulk_waiter.bulk->userdata = NULL;
742 spin_unlock(&bulk_waiter_spinlock);
743 }
744 kfree(waiter);
745 } else {
746 const VCHIQ_BULK_MODE_T mode_waiting =
747 VCHIQ_BULK_MODE_WAITING;
748 waiter->pid = current->pid;
749 mutex_lock(&instance->bulk_waiter_list_mutex);
750 list_add(&waiter->list, &instance->bulk_waiter_list);
751 mutex_unlock(&instance->bulk_waiter_list_mutex);
752 vchiq_log_info(vchiq_arm_log_level,
df044ebf
GKH
753 "saved bulk_waiter %pK for pid %d",
754 waiter, current->pid);
71bad7f0 755
756 if (copy_to_user((void __user *)
757 &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
758 arg)->mode),
759 (const void *)&mode_waiting,
760 sizeof(mode_waiting)) != 0)
761 ret = -EFAULT;
762 }
763 } break;
764
765 case VCHIQ_IOC_AWAIT_COMPLETION: {
766 VCHIQ_AWAIT_COMPLETION_T args;
767
768 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
769 if (!instance->connected) {
770 ret = -ENOTCONN;
771 break;
772 }
773
774 if (copy_from_user(&args, (const void __user *)arg,
775 sizeof(args)) != 0) {
776 ret = -EFAULT;
777 break;
778 }
779
780 mutex_lock(&instance->completion_mutex);
781
782 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
783 while ((instance->completion_remove ==
784 instance->completion_insert)
785 && !instance->closing) {
786 int rc;
787 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
788 mutex_unlock(&instance->completion_mutex);
789 rc = down_interruptible(&instance->insert_event);
790 mutex_lock(&instance->completion_mutex);
791 if (rc != 0) {
792 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
793 vchiq_log_info(vchiq_arm_log_level,
794 "AWAIT_COMPLETION interrupted");
795 ret = -EINTR;
796 break;
797 }
798 }
799 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
800
801 /* A read memory barrier is needed to stop prefetch of a stale
802 ** completion record
803 */
804 rmb();
805
806 if (ret == 0) {
807 int msgbufcount = args.msgbufcount;
808 for (ret = 0; ret < args.count; ret++) {
809 VCHIQ_COMPLETION_DATA_T *completion;
810 VCHIQ_SERVICE_T *service;
811 USER_SERVICE_T *user_service;
812 VCHIQ_HEADER_T *header;
813 if (instance->completion_remove ==
814 instance->completion_insert)
815 break;
816 completion = &instance->completions[
817 instance->completion_remove &
818 (MAX_COMPLETIONS - 1)];
819
820 service = completion->service_userdata;
821 user_service = service->base.userdata;
822 completion->service_userdata =
823 user_service->userdata;
824
825 header = completion->header;
826 if (header) {
827 void __user *msgbuf;
828 int msglen;
829
830 msglen = header->size +
831 sizeof(VCHIQ_HEADER_T);
832 /* This must be a VCHIQ-style service */
833 if (args.msgbufsize < msglen) {
834 vchiq_log_error(
835 vchiq_arm_log_level,
df044ebf
GKH
836 "header %pK: msgbufsize %x < msglen %x",
837 header, args.msgbufsize,
71bad7f0 838 msglen);
839 WARN(1, "invalid message "
840 "size\n");
841 if (ret == 0)
842 ret = -EMSGSIZE;
843 break;
844 }
845 if (msgbufcount <= 0)
846 /* Stall here for lack of a
847 ** buffer for the message. */
848 break;
849 /* Get the pointer from user space */
850 msgbufcount--;
851 if (copy_from_user(&msgbuf,
852 (const void __user *)
853 &args.msgbufs[msgbufcount],
854 sizeof(msgbuf)) != 0) {
855 if (ret == 0)
856 ret = -EFAULT;
857 break;
858 }
859
860 /* Copy the message to user space */
861 if (copy_to_user(msgbuf, header,
862 msglen) != 0) {
863 if (ret == 0)
864 ret = -EFAULT;
865 break;
866 }
867
868 /* Now it has been copied, the message
869 ** can be released. */
870 vchiq_release_message(service->handle,
871 header);
872
873 /* The completion must point to the
874 ** msgbuf. */
875 completion->header = msgbuf;
876 }
877
878 if ((completion->reason ==
879 VCHIQ_SERVICE_CLOSED) &&
880 !instance->use_close_delivered)
881 unlock_service(service);
882
883 if (copy_to_user((void __user *)(
884 (size_t)args.buf +
885 ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
886 completion,
887 sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
888 if (ret == 0)
889 ret = -EFAULT;
890 break;
891 }
892
893 instance->completion_remove++;
894 }
895
896 if (msgbufcount != args.msgbufcount) {
897 if (copy_to_user((void __user *)
898 &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
899 msgbufcount,
900 &msgbufcount,
901 sizeof(msgbufcount)) != 0) {
902 ret = -EFAULT;
903 }
904 }
905 }
906
907 if (ret != 0)
908 up(&instance->remove_event);
909 mutex_unlock(&instance->completion_mutex);
910 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
911 } break;
912
913 case VCHIQ_IOC_DEQUEUE_MESSAGE: {
914 VCHIQ_DEQUEUE_MESSAGE_T args;
915 USER_SERVICE_T *user_service;
916 VCHIQ_HEADER_T *header;
917
918 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
919 if (copy_from_user
920 (&args, (const void __user *)arg,
921 sizeof(args)) != 0) {
922 ret = -EFAULT;
923 break;
924 }
925 service = find_service_for_instance(instance, args.handle);
926 if (!service) {
927 ret = -EINVAL;
928 break;
929 }
930 user_service = (USER_SERVICE_T *)service->base.userdata;
931 if (user_service->is_vchi == 0) {
932 ret = -EINVAL;
933 break;
934 }
935
936 spin_lock(&msg_queue_spinlock);
937 if (user_service->msg_remove == user_service->msg_insert) {
938 if (!args.blocking) {
939 spin_unlock(&msg_queue_spinlock);
940 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
941 ret = -EWOULDBLOCK;
942 break;
943 }
944 user_service->dequeue_pending = 1;
945 do {
946 spin_unlock(&msg_queue_spinlock);
947 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
948 if (down_interruptible(
949 &user_service->insert_event) != 0) {
950 vchiq_log_info(vchiq_arm_log_level,
951 "DEQUEUE_MESSAGE interrupted");
952 ret = -EINTR;
953 break;
954 }
955 spin_lock(&msg_queue_spinlock);
956 } while (user_service->msg_remove ==
957 user_service->msg_insert);
958
959 if (ret)
960 break;
961 }
962
963 BUG_ON((int)(user_service->msg_insert -
964 user_service->msg_remove) < 0);
965
966 header = user_service->msg_queue[user_service->msg_remove &
967 (MSG_QUEUE_SIZE - 1)];
968 user_service->msg_remove++;
969 spin_unlock(&msg_queue_spinlock);
970
971 up(&user_service->remove_event);
972 if (header == NULL)
973 ret = -ENOTCONN;
974 else if (header->size <= args.bufsize) {
975 /* Copy to user space if msgbuf is not NULL */
976 if ((args.buf == NULL) ||
977 (copy_to_user((void __user *)args.buf,
978 header->data,
979 header->size) == 0)) {
980 ret = header->size;
981 vchiq_release_message(
982 service->handle,
983 header);
984 } else
985 ret = -EFAULT;
986 } else {
987 vchiq_log_error(vchiq_arm_log_level,
df044ebf
GKH
988 "header %pK: bufsize %x < size %x",
989 header, args.bufsize, header->size);
71bad7f0 990 WARN(1, "invalid size\n");
991 ret = -EMSGSIZE;
992 }
993 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
994 } break;
995
996 case VCHIQ_IOC_GET_CLIENT_ID: {
997 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
998
999 ret = vchiq_get_client_id(handle);
1000 } break;
1001
1002 case VCHIQ_IOC_GET_CONFIG: {
1003 VCHIQ_GET_CONFIG_T args;
1004 VCHIQ_CONFIG_T config;
1005
1006 if (copy_from_user(&args, (const void __user *)arg,
1007 sizeof(args)) != 0) {
1008 ret = -EFAULT;
1009 break;
1010 }
1011 if (args.config_size > sizeof(config)) {
1012 ret = -EINVAL;
1013 break;
1014 }
1015 status = vchiq_get_config(instance, args.config_size, &config);
1016 if (status == VCHIQ_SUCCESS) {
1017 if (copy_to_user((void __user *)args.pconfig,
1018 &config, args.config_size) != 0) {
1019 ret = -EFAULT;
1020 break;
1021 }
1022 }
1023 } break;
1024
1025 case VCHIQ_IOC_SET_SERVICE_OPTION: {
1026 VCHIQ_SET_SERVICE_OPTION_T args;
1027
1028 if (copy_from_user(
1029 &args, (const void __user *)arg,
1030 sizeof(args)) != 0) {
1031 ret = -EFAULT;
1032 break;
1033 }
1034
1035 service = find_service_for_instance(instance, args.handle);
1036 if (!service) {
1037 ret = -EINVAL;
1038 break;
1039 }
1040
1041 status = vchiq_set_service_option(
1042 args.handle, args.option, args.value);
1043 } break;
1044
1045 case VCHIQ_IOC_DUMP_PHYS_MEM: {
1046 VCHIQ_DUMP_MEM_T args;
1047
1048 if (copy_from_user
1049 (&args, (const void __user *)arg,
1050 sizeof(args)) != 0) {
1051 ret = -EFAULT;
1052 break;
1053 }
1054 dump_phys_mem(args.virt_addr, args.num_bytes);
1055 } break;
1056
1057 case VCHIQ_IOC_LIB_VERSION: {
1058 unsigned int lib_version = (unsigned int)arg;
1059
1060 if (lib_version < VCHIQ_VERSION_MIN)
1061 ret = -EINVAL;
1062 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1063 instance->use_close_delivered = 1;
1064 } break;
1065
1066 case VCHIQ_IOC_CLOSE_DELIVERED: {
1067 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1068
1069 service = find_closed_service_for_instance(instance, handle);
1070 if (service != NULL) {
1071 USER_SERVICE_T *user_service =
1072 (USER_SERVICE_T *)service->base.userdata;
1073 close_delivered(user_service);
1074 }
1075 else
1076 ret = -EINVAL;
1077 } break;
1078
1079 default:
1080 ret = -ENOTTY;
1081 break;
1082 }
1083
1084 if (service)
1085 unlock_service(service);
1086
1087 if (ret == 0) {
1088 if (status == VCHIQ_ERROR)
1089 ret = -EIO;
1090 else if (status == VCHIQ_RETRY)
1091 ret = -EINTR;
1092 }
1093
1094 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1095 (ret != -EWOULDBLOCK))
1096 vchiq_log_info(vchiq_arm_log_level,
1097 " ioctl instance %lx, cmd %s -> status %d, %ld",
1098 (unsigned long)instance,
1099 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1100 ioctl_names[_IOC_NR(cmd)] :
1101 "<invalid>",
1102 status, ret);
1103 else
1104 vchiq_log_trace(vchiq_arm_log_level,
1105 " ioctl instance %lx, cmd %s -> status %d, %ld",
1106 (unsigned long)instance,
1107 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1108 ioctl_names[_IOC_NR(cmd)] :
1109 "<invalid>",
1110 status, ret);
1111
1112 return ret;
1113}
1114
1115/****************************************************************************
1116*
1117* vchiq_open
1118*
1119***************************************************************************/
1120
1121static int
1122vchiq_open(struct inode *inode, struct file *file)
1123{
1124 int dev = iminor(inode) & 0x0f;
1125 vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1126 switch (dev) {
1127 case VCHIQ_MINOR: {
1128 int ret;
1129 VCHIQ_STATE_T *state = vchiq_get_state();
1130 VCHIQ_INSTANCE_T instance;
1131
1132 if (!state) {
1133 vchiq_log_error(vchiq_arm_log_level,
1134 "vchiq has no connection to VideoCore");
1135 return -ENOTCONN;
1136 }
1137
1138 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1139 if (!instance)
1140 return -ENOMEM;
1141
1142 instance->state = state;
1143 instance->pid = current->tgid;
1144
1145 ret = vchiq_debugfs_add_instance(instance);
1146 if (ret != 0) {
1147 kfree(instance);
1148 return ret;
1149 }
1150
1151 sema_init(&instance->insert_event, 0);
1152 sema_init(&instance->remove_event, 0);
1153 mutex_init(&instance->completion_mutex);
1154 mutex_init(&instance->bulk_waiter_list_mutex);
1155 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1156
1157 file->private_data = instance;
1158 } break;
1159
1160 default:
1161 vchiq_log_error(vchiq_arm_log_level,
1162 "Unknown minor device: %d", dev);
1163 return -ENXIO;
1164 }
1165
1166 return 0;
1167}
1168
1169/****************************************************************************
1170*
1171* vchiq_release
1172*
1173***************************************************************************/
1174
1175static int
1176vchiq_release(struct inode *inode, struct file *file)
1177{
1178 int dev = iminor(inode) & 0x0f;
1179 int ret = 0;
1180 switch (dev) {
1181 case VCHIQ_MINOR: {
1182 VCHIQ_INSTANCE_T instance = file->private_data;
1183 VCHIQ_STATE_T *state = vchiq_get_state();
1184 VCHIQ_SERVICE_T *service;
1185 int i;
1186
1187 vchiq_log_info(vchiq_arm_log_level,
1188 "vchiq_release: instance=%lx",
1189 (unsigned long)instance);
1190
1191 if (!state) {
1192 ret = -EPERM;
1193 goto out;
1194 }
1195
1196 /* Ensure videocore is awake to allow termination. */
1197 vchiq_use_internal(instance->state, NULL,
1198 USE_TYPE_VCHIQ);
1199
1200 mutex_lock(&instance->completion_mutex);
1201
1202 /* Wake the completion thread and ask it to exit */
1203 instance->closing = 1;
1204 up(&instance->insert_event);
1205
1206 mutex_unlock(&instance->completion_mutex);
1207
1208 /* Wake the slot handler if the completion queue is full. */
1209 up(&instance->remove_event);
1210
1211 /* Mark all services for termination... */
1212 i = 0;
1213 while ((service = next_service_by_instance(state, instance,
1214 &i)) != NULL) {
1215 USER_SERVICE_T *user_service = service->base.userdata;
1216
1217 /* Wake the slot handler if the msg queue is full. */
1218 up(&user_service->remove_event);
1219
1220 vchiq_terminate_service_internal(service);
1221 unlock_service(service);
1222 }
1223
1224 /* ...and wait for them to die */
1225 i = 0;
1226 while ((service = next_service_by_instance(state, instance, &i))
1227 != NULL) {
1228 USER_SERVICE_T *user_service = service->base.userdata;
1229
1230 down(&service->remove_event);
1231
1232 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1233
1234 spin_lock(&msg_queue_spinlock);
1235
1236 while (user_service->msg_remove !=
1237 user_service->msg_insert) {
1238 VCHIQ_HEADER_T *header = user_service->
1239 msg_queue[user_service->msg_remove &
1240 (MSG_QUEUE_SIZE - 1)];
1241 user_service->msg_remove++;
1242 spin_unlock(&msg_queue_spinlock);
1243
1244 if (header)
1245 vchiq_release_message(
1246 service->handle,
1247 header);
1248 spin_lock(&msg_queue_spinlock);
1249 }
1250
1251 spin_unlock(&msg_queue_spinlock);
1252
1253 unlock_service(service);
1254 }
1255
1256 /* Release any closed services */
1257 while (instance->completion_remove !=
1258 instance->completion_insert) {
1259 VCHIQ_COMPLETION_DATA_T *completion;
1260 VCHIQ_SERVICE_T *service;
1261 completion = &instance->completions[
1262 instance->completion_remove &
1263 (MAX_COMPLETIONS - 1)];
1264 service = completion->service_userdata;
1265 if (completion->reason == VCHIQ_SERVICE_CLOSED)
1266 {
1267 USER_SERVICE_T *user_service =
1268 service->base.userdata;
1269
1270 /* Wake any blocked user-thread */
1271 if (instance->use_close_delivered)
1272 up(&user_service->close_event);
1273 unlock_service(service);
1274 }
1275 instance->completion_remove++;
1276 }
1277
1278 /* Release the PEER service count. */
1279 vchiq_release_internal(instance->state, NULL);
1280
1281 {
1282 struct list_head *pos, *next;
1283 list_for_each_safe(pos, next,
1284 &instance->bulk_waiter_list) {
1285 struct bulk_waiter_node *waiter;
1286 waiter = list_entry(pos,
1287 struct bulk_waiter_node,
1288 list);
1289 list_del(pos);
1290 vchiq_log_info(vchiq_arm_log_level,
df044ebf
GKH
1291 "bulk_waiter - cleaned up %pK for pid %d",
1292 waiter, waiter->pid);
71bad7f0 1293 kfree(waiter);
1294 }
1295 }
1296
1297 vchiq_debugfs_remove_instance(instance);
1298
1299 kfree(instance);
1300 file->private_data = NULL;
1301 } break;
1302
1303 default:
1304 vchiq_log_error(vchiq_arm_log_level,
1305 "Unknown minor device: %d", dev);
1306 ret = -ENXIO;
1307 }
1308
1309out:
1310 return ret;
1311}
1312
1313/****************************************************************************
1314*
1315* vchiq_dump
1316*
1317***************************************************************************/
1318
1319void
1320vchiq_dump(void *dump_context, const char *str, int len)
1321{
1322 DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
1323
1324 if (context->actual < context->space) {
1325 int copy_bytes;
1326 if (context->offset > 0) {
1327 int skip_bytes = min(len, (int)context->offset);
1328 str += skip_bytes;
1329 len -= skip_bytes;
1330 context->offset -= skip_bytes;
1331 if (context->offset > 0)
1332 return;
1333 }
1334 copy_bytes = min(len, (int)(context->space - context->actual));
1335 if (copy_bytes == 0)
1336 return;
1337 if (copy_to_user(context->buf + context->actual, str,
1338 copy_bytes))
1339 context->actual = -EFAULT;
1340 context->actual += copy_bytes;
1341 len -= copy_bytes;
1342
1343 /* If tne terminating NUL is included in the length, then it
1344 ** marks the end of a line and should be replaced with a
1345 ** carriage return. */
1346 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1347 char cr = '\n';
1348 if (copy_to_user(context->buf + context->actual - 1,
1349 &cr, 1))
1350 context->actual = -EFAULT;
1351 }
1352 }
1353}
1354
1355/****************************************************************************
1356*
1357* vchiq_dump_platform_instance_state
1358*
1359***************************************************************************/
1360
1361void
1362vchiq_dump_platform_instances(void *dump_context)
1363{
1364 VCHIQ_STATE_T *state = vchiq_get_state();
1365 char buf[80];
1366 int len;
1367 int i;
1368
1369 /* There is no list of instances, so instead scan all services,
1370 marking those that have been dumped. */
1371
1372 for (i = 0; i < state->unused_service; i++) {
1373 VCHIQ_SERVICE_T *service = state->services[i];
1374 VCHIQ_INSTANCE_T instance;
1375
1376 if (service && (service->base.callback == service_callback)) {
1377 instance = service->instance;
1378 if (instance)
1379 instance->mark = 0;
1380 }
1381 }
1382
1383 for (i = 0; i < state->unused_service; i++) {
1384 VCHIQ_SERVICE_T *service = state->services[i];
1385 VCHIQ_INSTANCE_T instance;
1386
1387 if (service && (service->base.callback == service_callback)) {
1388 instance = service->instance;
1389 if (instance && !instance->mark) {
1390 len = snprintf(buf, sizeof(buf),
df044ebf
GKH
1391 "Instance %pK: pid %d,%s completions %d/%d",
1392 instance, instance->pid,
71bad7f0 1393 instance->connected ? " connected, " :
1394 "",
1395 instance->completion_insert -
1396 instance->completion_remove,
1397 MAX_COMPLETIONS);
1398
1399 vchiq_dump(dump_context, buf, len + 1);
1400
1401 instance->mark = 1;
1402 }
1403 }
1404 }
1405}
1406
1407/****************************************************************************
1408*
1409* vchiq_dump_platform_service_state
1410*
1411***************************************************************************/
1412
1413void
1414vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
1415{
1416 USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
1417 char buf[80];
1418 int len;
1419
df044ebf 1420 len = snprintf(buf, sizeof(buf), " instance %pK", service->instance);
71bad7f0 1421
1422 if ((service->base.callback == service_callback) &&
1423 user_service->is_vchi) {
1424 len += snprintf(buf + len, sizeof(buf) - len,
1425 ", %d/%d messages",
1426 user_service->msg_insert - user_service->msg_remove,
1427 MSG_QUEUE_SIZE);
1428
1429 if (user_service->dequeue_pending)
1430 len += snprintf(buf + len, sizeof(buf) - len,
1431 " (dequeue pending)");
1432 }
1433
1434 vchiq_dump(dump_context, buf, len + 1);
1435}
1436
1437/****************************************************************************
1438*
1439* dump_user_mem
1440*
1441***************************************************************************/
1442
1443static void
1444dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1445{
1446 int rc;
1447 uint8_t *end_virt_addr = virt_addr + num_bytes;
1448 int num_pages;
1449 int offset;
1450 int end_offset;
1451 int page_idx;
1452 int prev_idx;
1453 struct page *page;
1454 struct page **pages;
1455 uint8_t *kmapped_virt_ptr;
1456
1457 /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
1458
1459 virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
1460 end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
1461 ~0x0fuL);
1462
1463 offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
1464 end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
1465
1466 num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1467
1468 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1469 if (pages == NULL) {
1470 vchiq_log_error(vchiq_arm_log_level,
1471 "Unable to allocation memory for %d pages\n",
1472 num_pages);
1473 return;
1474 }
1475
1476 down_read(&current->mm->mmap_sem);
166beccd 1477 rc = get_user_pages(
71bad7f0 1478 (unsigned long)virt_addr, /* start */
1479 num_pages, /* len */
768ae309 1480 0, /* gup_flags */
71bad7f0 1481 pages, /* pages (array of page pointers) */
1482 NULL); /* vmas */
1483 up_read(&current->mm->mmap_sem);
1484
1485 prev_idx = -1;
1486 page = NULL;
1487
0feb1ed5
SW
1488 if (rc < 0) {
1489 vchiq_log_error(vchiq_arm_log_level,
1490 "Failed to get user pages: %d\n", rc);
1491 goto out;
1492 }
1493
71bad7f0 1494 while (offset < end_offset) {
1495
1496 int page_offset = offset % PAGE_SIZE;
1497 page_idx = offset / PAGE_SIZE;
1498
1499 if (page_idx != prev_idx) {
1500
1501 if (page != NULL)
1502 kunmap(page);
1503 page = pages[page_idx];
1504 kmapped_virt_ptr = kmap(page);
1505
1506 prev_idx = page_idx;
1507 }
1508
1509 if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
1510 vchiq_log_dump_mem("ph",
1511 (uint32_t)(unsigned long)&kmapped_virt_ptr[
1512 page_offset],
1513 &kmapped_virt_ptr[page_offset], 16);
1514
1515 offset += 16;
1516 }
0feb1ed5
SW
1517
1518out:
71bad7f0 1519 if (page != NULL)
1520 kunmap(page);
1521
1522 for (page_idx = 0; page_idx < num_pages; page_idx++)
232664b3 1523 put_page(pages[page_idx]);
71bad7f0 1524
1525 kfree(pages);
1526}
1527
1528/****************************************************************************
1529*
1530* vchiq_read
1531*
1532***************************************************************************/
1533
1534static ssize_t
1535vchiq_read(struct file *file, char __user *buf,
1536 size_t count, loff_t *ppos)
1537{
1538 DUMP_CONTEXT_T context;
1539 context.buf = buf;
1540 context.actual = 0;
1541 context.space = count;
1542 context.offset = *ppos;
1543
1544 vchiq_dump_state(&context, &g_state);
1545
1546 *ppos += context.actual;
1547
1548 return context.actual;
1549}
1550
1551VCHIQ_STATE_T *
1552vchiq_get_state(void)
1553{
1554
1555 if (g_state.remote == NULL)
1556 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
1557 else if (g_state.remote->initialised != 1)
1558 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
1559 __func__, g_state.remote->initialised);
1560
1561 return ((g_state.remote != NULL) &&
1562 (g_state.remote->initialised == 1)) ? &g_state : NULL;
1563}
1564
1565static const struct file_operations
1566vchiq_fops = {
1567 .owner = THIS_MODULE,
1568 .unlocked_ioctl = vchiq_ioctl,
1569 .open = vchiq_open,
1570 .release = vchiq_release,
1571 .read = vchiq_read
1572};
1573
1574/*
1575 * Autosuspend related functionality
1576 */
1577
1578int
1579vchiq_videocore_wanted(VCHIQ_STATE_T *state)
1580{
1581 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1582 if (!arm_state)
1583 /* autosuspend not supported - always return wanted */
1584 return 1;
1585 else if (arm_state->blocked_count)
1586 return 1;
1587 else if (!arm_state->videocore_use_count)
1588 /* usage count zero - check for override unless we're forcing */
1589 if (arm_state->resume_blocked)
1590 return 0;
1591 else
1592 return vchiq_platform_videocore_wanted(state);
1593 else
1594 /* non-zero usage count - videocore still required */
1595 return 1;
1596}
1597
1598static VCHIQ_STATUS_T
1599vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
1600 VCHIQ_HEADER_T *header,
1601 VCHIQ_SERVICE_HANDLE_T service_user,
1602 void *bulk_user)
1603{
1604 vchiq_log_error(vchiq_susp_log_level,
1605 "%s callback reason %d", __func__, reason);
1606 return 0;
1607}
1608
1609static int
1610vchiq_keepalive_thread_func(void *v)
1611{
1612 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
1613 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1614
1615 VCHIQ_STATUS_T status;
1616 VCHIQ_INSTANCE_T instance;
1617 VCHIQ_SERVICE_HANDLE_T ka_handle;
1618
1619 VCHIQ_SERVICE_PARAMS_T params = {
1620 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1621 .callback = vchiq_keepalive_vchiq_callback,
1622 .version = KEEPALIVE_VER,
1623 .version_min = KEEPALIVE_VER_MIN
1624 };
1625
1626 status = vchiq_initialise(&instance);
1627 if (status != VCHIQ_SUCCESS) {
1628 vchiq_log_error(vchiq_susp_log_level,
1629 "%s vchiq_initialise failed %d", __func__, status);
1630 goto exit;
1631 }
1632
1633 status = vchiq_connect(instance);
1634 if (status != VCHIQ_SUCCESS) {
1635 vchiq_log_error(vchiq_susp_log_level,
1636 "%s vchiq_connect failed %d", __func__, status);
1637 goto shutdown;
1638 }
1639
1640 status = vchiq_add_service(instance, &params, &ka_handle);
1641 if (status != VCHIQ_SUCCESS) {
1642 vchiq_log_error(vchiq_susp_log_level,
1643 "%s vchiq_open_service failed %d", __func__, status);
1644 goto shutdown;
1645 }
1646
1647 while (1) {
1648 long rc = 0, uc = 0;
1649 if (wait_for_completion_interruptible(&arm_state->ka_evt)
1650 != 0) {
1651 vchiq_log_error(vchiq_susp_log_level,
1652 "%s interrupted", __func__);
1653 flush_signals(current);
1654 continue;
1655 }
1656
1657 /* read and clear counters. Do release_count then use_count to
1658 * prevent getting more releases than uses */
1659 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1660 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1661
1662 /* Call use/release service the requisite number of times.
1663 * Process use before release so use counts don't go negative */
1664 while (uc--) {
1665 atomic_inc(&arm_state->ka_use_ack_count);
1666 status = vchiq_use_service(ka_handle);
1667 if (status != VCHIQ_SUCCESS) {
1668 vchiq_log_error(vchiq_susp_log_level,
1669 "%s vchiq_use_service error %d",
1670 __func__, status);
1671 }
1672 }
1673 while (rc--) {
1674 status = vchiq_release_service(ka_handle);
1675 if (status != VCHIQ_SUCCESS) {
1676 vchiq_log_error(vchiq_susp_log_level,
1677 "%s vchiq_release_service error %d",
1678 __func__, status);
1679 }
1680 }
1681 }
1682
1683shutdown:
1684 vchiq_shutdown(instance);
1685exit:
1686 return 0;
1687}
1688
1689
1690
1691VCHIQ_STATUS_T
1692vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
1693{
71bad7f0 1694 if (arm_state) {
1695 rwlock_init(&arm_state->susp_res_lock);
1696
1697 init_completion(&arm_state->ka_evt);
1698 atomic_set(&arm_state->ka_use_count, 0);
1699 atomic_set(&arm_state->ka_use_ack_count, 0);
1700 atomic_set(&arm_state->ka_release_count, 0);
1701
1702 init_completion(&arm_state->vc_suspend_complete);
1703
1704 init_completion(&arm_state->vc_resume_complete);
1705 /* Initialise to 'done' state. We only want to block on resume
1706 * completion while videocore is suspended. */
1707 set_resume_state(arm_state, VC_RESUME_RESUMED);
1708
1709 init_completion(&arm_state->resume_blocker);
1710 /* Initialise to 'done' state. We only want to block on this
1711 * completion while resume is blocked */
1712 complete_all(&arm_state->resume_blocker);
1713
1714 init_completion(&arm_state->blocked_blocker);
1715 /* Initialise to 'done' state. We only want to block on this
1716 * completion while things are waiting on the resume blocker */
1717 complete_all(&arm_state->blocked_blocker);
1718
1719 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
1720 arm_state->suspend_timer_running = 0;
53a6e71f 1721 setup_timer(&arm_state->suspend_timer, suspend_timer_callback,
1722 (unsigned long)(state));
71bad7f0 1723
1724 arm_state->first_connect = 0;
1725
1726 }
30d84860 1727 return VCHIQ_SUCCESS;
71bad7f0 1728}
1729
1730/*
1731** Functions to modify the state variables;
1732** set_suspend_state
1733** set_resume_state
1734**
1735** There are more state variables than we might like, so ensure they remain in
1736** step. Suspend and resume state are maintained separately, since most of
1737** these state machines can operate independently. However, there are a few
1738** states where state transitions in one state machine cause a reset to the
1739** other state machine. In addition, there are some completion events which
1740** need to occur on state machine reset and end-state(s), so these are also
1741** dealt with in these functions.
1742**
1743** In all states we set the state variable according to the input, but in some
1744** cases we perform additional steps outlined below;
1745**
1746** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
1747** The suspend completion is completed after any suspend
1748** attempt. When we reset the state machine we also reset
1749** the completion. This reset occurs when videocore is
1750** resumed, and also if we initiate suspend after a suspend
1751** failure.
1752**
1753** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
1754** suspend - ie from this point on we must try to suspend
1755** before resuming can occur. We therefore also reset the
1756** resume state machine to VC_RESUME_IDLE in this state.
1757**
1758** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
1759** complete_all on the suspend completion to notify
1760** anything waiting for suspend to happen.
1761**
1762** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
1763** initiate resume, so no need to alter resume state.
1764** We call complete_all on the suspend completion to notify
1765** of suspend rejection.
1766**
1767** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
1768** suspend completion and reset the resume state machine.
1769**
1770** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
1771** resume completion is in it's 'done' state whenever
fa9b8988
MY
1772** videcore is running. Therefore, the VC_RESUME_IDLE
1773** state implies that videocore is suspended.
71bad7f0 1774** Hence, any thread which needs to wait until videocore is
1775** running can wait on this completion - it will only block
1776** if videocore is suspended.
1777**
1778** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
1779** Call complete_all on the resume completion to unblock
1780** any threads waiting for resume. Also reset the suspend
1781** state machine to it's idle state.
1782**
1783** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
1784*/
1785
1786void
1787set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
1788 enum vc_suspend_status new_state)
1789{
1790 /* set the state in all cases */
1791 arm_state->vc_suspend_state = new_state;
1792
1793 /* state specific additional actions */
1794 switch (new_state) {
1795 case VC_SUSPEND_FORCE_CANCELED:
1796 complete_all(&arm_state->vc_suspend_complete);
1797 break;
1798 case VC_SUSPEND_REJECTED:
1799 complete_all(&arm_state->vc_suspend_complete);
1800 break;
1801 case VC_SUSPEND_FAILED:
1802 complete_all(&arm_state->vc_suspend_complete);
1803 arm_state->vc_resume_state = VC_RESUME_RESUMED;
1804 complete_all(&arm_state->vc_resume_complete);
1805 break;
1806 case VC_SUSPEND_IDLE:
1807 reinit_completion(&arm_state->vc_suspend_complete);
1808 break;
1809 case VC_SUSPEND_REQUESTED:
1810 break;
1811 case VC_SUSPEND_IN_PROGRESS:
1812 set_resume_state(arm_state, VC_RESUME_IDLE);
1813 break;
1814 case VC_SUSPEND_SUSPENDED:
1815 complete_all(&arm_state->vc_suspend_complete);
1816 break;
1817 default:
1818 BUG();
1819 break;
1820 }
1821}
1822
1823void
1824set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
1825 enum vc_resume_status new_state)
1826{
1827 /* set the state in all cases */
1828 arm_state->vc_resume_state = new_state;
1829
1830 /* state specific additional actions */
1831 switch (new_state) {
1832 case VC_RESUME_FAILED:
1833 break;
1834 case VC_RESUME_IDLE:
1835 reinit_completion(&arm_state->vc_resume_complete);
1836 break;
1837 case VC_RESUME_REQUESTED:
1838 break;
1839 case VC_RESUME_IN_PROGRESS:
1840 break;
1841 case VC_RESUME_RESUMED:
1842 complete_all(&arm_state->vc_resume_complete);
1843 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
1844 break;
1845 default:
1846 BUG();
1847 break;
1848 }
1849}
1850
1851
1852/* should be called with the write lock held */
1853inline void
1854start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1855{
1856 del_timer(&arm_state->suspend_timer);
1857 arm_state->suspend_timer.expires = jiffies +
1858 msecs_to_jiffies(arm_state->
1859 suspend_timer_timeout);
1860 add_timer(&arm_state->suspend_timer);
1861 arm_state->suspend_timer_running = 1;
1862}
1863
1864/* should be called with the write lock held */
1865static inline void
1866stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1867{
1868 if (arm_state->suspend_timer_running) {
1869 del_timer(&arm_state->suspend_timer);
1870 arm_state->suspend_timer_running = 0;
1871 }
1872}
1873
1874static inline int
1875need_resume(VCHIQ_STATE_T *state)
1876{
1877 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1878 return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
1879 (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
1880 vchiq_videocore_wanted(state);
1881}
1882
1883static int
1884block_resume(VCHIQ_ARM_STATE_T *arm_state)
1885{
1886 int status = VCHIQ_SUCCESS;
1887 const unsigned long timeout_val =
1888 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
1889 int resume_count = 0;
1890
1891 /* Allow any threads which were blocked by the last force suspend to
1892 * complete if they haven't already. Only give this one shot; if
1893 * blocked_count is incremented after blocked_blocker is completed
1894 * (which only happens when blocked_count hits 0) then those threads
1895 * will have to wait until next time around */
1896 if (arm_state->blocked_count) {
1897 reinit_completion(&arm_state->blocked_blocker);
1898 write_unlock_bh(&arm_state->susp_res_lock);
1899 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
1900 "blocked clients", __func__);
1901 if (wait_for_completion_interruptible_timeout(
1902 &arm_state->blocked_blocker, timeout_val)
1903 <= 0) {
1904 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
1905 "previously blocked clients failed" , __func__);
1906 status = VCHIQ_ERROR;
1907 write_lock_bh(&arm_state->susp_res_lock);
1908 goto out;
1909 }
1910 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
1911 "clients resumed", __func__);
1912 write_lock_bh(&arm_state->susp_res_lock);
1913 }
1914
1915 /* We need to wait for resume to complete if it's in process */
1916 while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
1917 arm_state->vc_resume_state > VC_RESUME_IDLE) {
1918 if (resume_count > 1) {
1919 status = VCHIQ_ERROR;
1920 vchiq_log_error(vchiq_susp_log_level, "%s waited too "
1921 "many times for resume" , __func__);
1922 goto out;
1923 }
1924 write_unlock_bh(&arm_state->susp_res_lock);
1925 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
1926 __func__);
1927 if (wait_for_completion_interruptible_timeout(
1928 &arm_state->vc_resume_complete, timeout_val)
1929 <= 0) {
1930 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
1931 "resume failed (%s)", __func__,
1932 resume_state_names[arm_state->vc_resume_state +
1933 VC_RESUME_NUM_OFFSET]);
1934 status = VCHIQ_ERROR;
1935 write_lock_bh(&arm_state->susp_res_lock);
1936 goto out;
1937 }
1938 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
1939 write_lock_bh(&arm_state->susp_res_lock);
1940 resume_count++;
1941 }
1942 reinit_completion(&arm_state->resume_blocker);
1943 arm_state->resume_blocked = 1;
1944
1945out:
1946 return status;
1947}
1948
1949static inline void
1950unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
1951{
1952 complete_all(&arm_state->resume_blocker);
1953 arm_state->resume_blocked = 0;
1954}
1955
1956/* Initiate suspend via slot handler. Should be called with the write lock
1957 * held */
1958VCHIQ_STATUS_T
1959vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
1960{
1961 VCHIQ_STATUS_T status = VCHIQ_ERROR;
1962 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1963
1964 if (!arm_state)
1965 goto out;
1966
1967 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
1968 status = VCHIQ_SUCCESS;
1969
1970
1971 switch (arm_state->vc_suspend_state) {
1972 case VC_SUSPEND_REQUESTED:
1973 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
1974 "requested", __func__);
1975 break;
1976 case VC_SUSPEND_IN_PROGRESS:
1977 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
1978 "progress", __func__);
1979 break;
1980
1981 default:
1982 /* We don't expect to be in other states, so log but continue
1983 * anyway */
1984 vchiq_log_error(vchiq_susp_log_level,
1985 "%s unexpected suspend state %s", __func__,
1986 suspend_state_names[arm_state->vc_suspend_state +
1987 VC_SUSPEND_NUM_OFFSET]);
1988 /* fall through */
1989 case VC_SUSPEND_REJECTED:
1990 case VC_SUSPEND_FAILED:
1991 /* Ensure any idle state actions have been run */
1992 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
1993 /* fall through */
1994 case VC_SUSPEND_IDLE:
1995 vchiq_log_info(vchiq_susp_log_level,
1996 "%s: suspending", __func__);
1997 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
1998 /* kick the slot handler thread to initiate suspend */
1999 request_poll(state, NULL, 0);
2000 break;
2001 }
2002
2003out:
2004 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2005 return status;
2006}
2007
2008void
2009vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
2010{
2011 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2012 int susp = 0;
2013
2014 if (!arm_state)
2015 goto out;
2016
2017 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2018
2019 write_lock_bh(&arm_state->susp_res_lock);
2020 if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2021 arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2022 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2023 susp = 1;
2024 }
2025 write_unlock_bh(&arm_state->susp_res_lock);
2026
2027 if (susp)
2028 vchiq_platform_suspend(state);
2029
2030out:
2031 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2032 return;
2033}
2034
2035
2036static void
2037output_timeout_error(VCHIQ_STATE_T *state)
2038{
2039 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
6d5f49a9 2040 char err[50] = "";
71bad7f0 2041 int vc_use_count = arm_state->videocore_use_count;
2042 int active_services = state->unused_service;
2043 int i;
2044
2045 if (!arm_state->videocore_use_count) {
6d5f49a9 2046 snprintf(err, sizeof(err), " Videocore usecount is 0");
71bad7f0 2047 goto output_msg;
2048 }
2049 for (i = 0; i < active_services; i++) {
2050 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2051 if (service_ptr && service_ptr->service_use_count &&
2052 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
6d5f49a9 2053 snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
71bad7f0 2054 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2055 service_ptr->base.fourcc),
2056 service_ptr->client_id,
2057 service_ptr->service_use_count,
2058 service_ptr->service_use_count ==
2059 vc_use_count ? "" : " (+ more)");
2060 break;
2061 }
2062 }
2063
2064output_msg:
2065 vchiq_log_error(vchiq_susp_log_level,
2066 "timed out waiting for vc suspend (%d).%s",
6d5f49a9 2067 arm_state->autosuspend_override, err);
71bad7f0 2068
2069}
2070
2071/* Try to get videocore into suspended state, regardless of autosuspend state.
2072** We don't actually force suspend, since videocore may get into a bad state
2073** if we force suspend at a bad time. Instead, we wait for autosuspend to
2074** determine a good point to suspend. If this doesn't happen within 100ms we
2075** report failure.
2076**
2077** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2078** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2079*/
2080VCHIQ_STATUS_T
2081vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
2082{
2083 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2084 VCHIQ_STATUS_T status = VCHIQ_ERROR;
2085 long rc = 0;
2086 int repeat = -1;
2087
2088 if (!arm_state)
2089 goto out;
2090
2091 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2092
2093 write_lock_bh(&arm_state->susp_res_lock);
2094
2095 status = block_resume(arm_state);
2096 if (status != VCHIQ_SUCCESS)
2097 goto unlock;
2098 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2099 /* Already suspended - just block resume and exit */
2100 vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
2101 __func__);
2102 status = VCHIQ_SUCCESS;
2103 goto unlock;
2104 } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
2105 /* initiate suspend immediately in the case that we're waiting
2106 * for the timeout */
2107 stop_suspend_timer(arm_state);
2108 if (!vchiq_videocore_wanted(state)) {
2109 vchiq_log_info(vchiq_susp_log_level, "%s videocore "
2110 "idle, initiating suspend", __func__);
2111 status = vchiq_arm_vcsuspend(state);
2112 } else if (arm_state->autosuspend_override <
2113 FORCE_SUSPEND_FAIL_MAX) {
2114 vchiq_log_info(vchiq_susp_log_level, "%s letting "
2115 "videocore go idle", __func__);
2116 status = VCHIQ_SUCCESS;
2117 } else {
2118 vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
2119 "many times - attempting suspend", __func__);
2120 status = vchiq_arm_vcsuspend(state);
2121 }
2122 } else {
2123 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
2124 "in progress - wait for completion", __func__);
2125 status = VCHIQ_SUCCESS;
2126 }
2127
2128 /* Wait for suspend to happen due to system idle (not forced..) */
2129 if (status != VCHIQ_SUCCESS)
2130 goto unblock_resume;
2131
2132 do {
2133 write_unlock_bh(&arm_state->susp_res_lock);
2134
2135 rc = wait_for_completion_interruptible_timeout(
2136 &arm_state->vc_suspend_complete,
2137 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
2138
2139 write_lock_bh(&arm_state->susp_res_lock);
2140 if (rc < 0) {
2141 vchiq_log_warning(vchiq_susp_log_level, "%s "
2142 "interrupted waiting for suspend", __func__);
2143 status = VCHIQ_ERROR;
2144 goto unblock_resume;
2145 } else if (rc == 0) {
2146 if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
2147 /* Repeat timeout once if in progress */
2148 if (repeat < 0) {
2149 repeat = 1;
2150 continue;
2151 }
2152 }
2153 arm_state->autosuspend_override++;
2154 output_timeout_error(state);
2155
2156 status = VCHIQ_RETRY;
2157 goto unblock_resume;
2158 }
2159 } while (0 < (repeat--));
2160
2161 /* Check and report state in case we need to abort ARM suspend */
2162 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
2163 status = VCHIQ_RETRY;
2164 vchiq_log_error(vchiq_susp_log_level,
2165 "%s videocore suspend failed (state %s)", __func__,
2166 suspend_state_names[arm_state->vc_suspend_state +
2167 VC_SUSPEND_NUM_OFFSET]);
2168 /* Reset the state only if it's still in an error state.
2169 * Something could have already initiated another suspend. */
2170 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
2171 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2172
2173 goto unblock_resume;
2174 }
2175
2176 /* successfully suspended - unlock and exit */
2177 goto unlock;
2178
2179unblock_resume:
2180 /* all error states need to unblock resume before exit */
2181 unblock_resume(arm_state);
2182
2183unlock:
2184 write_unlock_bh(&arm_state->susp_res_lock);
2185
2186out:
2187 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2188 return status;
2189}
2190
2191void
2192vchiq_check_suspend(VCHIQ_STATE_T *state)
2193{
2194 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2195
2196 if (!arm_state)
2197 goto out;
2198
2199 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2200
2201 write_lock_bh(&arm_state->susp_res_lock);
2202 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2203 arm_state->first_connect &&
2204 !vchiq_videocore_wanted(state)) {
2205 vchiq_arm_vcsuspend(state);
2206 }
2207 write_unlock_bh(&arm_state->susp_res_lock);
2208
2209out:
2210 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2211 return;
2212}
2213
2214
2215int
2216vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
2217{
2218 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2219 int resume = 0;
2220 int ret = -1;
2221
2222 if (!arm_state)
2223 goto out;
2224
2225 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2226
2227 write_lock_bh(&arm_state->susp_res_lock);
2228 unblock_resume(arm_state);
2229 resume = vchiq_check_resume(state);
2230 write_unlock_bh(&arm_state->susp_res_lock);
2231
2232 if (resume) {
2233 if (wait_for_completion_interruptible(
2234 &arm_state->vc_resume_complete) < 0) {
2235 vchiq_log_error(vchiq_susp_log_level,
2236 "%s interrupted", __func__);
2237 /* failed, cannot accurately derive suspend
2238 * state, so exit early. */
2239 goto out;
2240 }
2241 }
2242
2243 read_lock_bh(&arm_state->susp_res_lock);
2244 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2245 vchiq_log_info(vchiq_susp_log_level,
2246 "%s: Videocore remains suspended", __func__);
2247 } else {
2248 vchiq_log_info(vchiq_susp_log_level,
2249 "%s: Videocore resumed", __func__);
2250 ret = 0;
2251 }
2252 read_unlock_bh(&arm_state->susp_res_lock);
2253out:
2254 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2255 return ret;
2256}
2257
2258/* This function should be called with the write lock held */
2259int
2260vchiq_check_resume(VCHIQ_STATE_T *state)
2261{
2262 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2263 int resume = 0;
2264
2265 if (!arm_state)
2266 goto out;
2267
2268 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2269
2270 if (need_resume(state)) {
2271 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2272 request_poll(state, NULL, 0);
2273 resume = 1;
2274 }
2275
2276out:
2277 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2278 return resume;
2279}
2280
2281void
2282vchiq_platform_check_resume(VCHIQ_STATE_T *state)
2283{
2284 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2285 int res = 0;
2286
2287 if (!arm_state)
2288 goto out;
2289
2290 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2291
2292 write_lock_bh(&arm_state->susp_res_lock);
2293 if (arm_state->wake_address == 0) {
2294 vchiq_log_info(vchiq_susp_log_level,
2295 "%s: already awake", __func__);
2296 goto unlock;
2297 }
2298 if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
2299 vchiq_log_info(vchiq_susp_log_level,
2300 "%s: already resuming", __func__);
2301 goto unlock;
2302 }
2303
2304 if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
2305 set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
2306 res = 1;
2307 } else
2308 vchiq_log_trace(vchiq_susp_log_level,
2309 "%s: not resuming (resume state %s)", __func__,
2310 resume_state_names[arm_state->vc_resume_state +
2311 VC_RESUME_NUM_OFFSET]);
2312
2313unlock:
2314 write_unlock_bh(&arm_state->susp_res_lock);
2315
2316 if (res)
2317 vchiq_platform_resume(state);
2318
2319out:
2320 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2321 return;
2322
2323}
2324
2325
2326
2327VCHIQ_STATUS_T
2328vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
2329 enum USE_TYPE_E use_type)
2330{
2331 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2332 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2333 char entity[16];
2334 int *entity_uc;
2335 int local_uc, local_entity_uc;
2336
2337 if (!arm_state)
2338 goto out;
2339
2340 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2341
2342 if (use_type == USE_TYPE_VCHIQ) {
2343 sprintf(entity, "VCHIQ: ");
2344 entity_uc = &arm_state->peer_use_count;
2345 } else if (service) {
2346 sprintf(entity, "%c%c%c%c:%03d",
2347 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2348 service->client_id);
2349 entity_uc = &service->service_use_count;
2350 } else {
2351 vchiq_log_error(vchiq_susp_log_level, "%s null service "
2352 "ptr", __func__);
2353 ret = VCHIQ_ERROR;
2354 goto out;
2355 }
2356
2357 write_lock_bh(&arm_state->susp_res_lock);
2358 while (arm_state->resume_blocked) {
2359 /* If we call 'use' while force suspend is waiting for suspend,
2360 * then we're about to block the thread which the force is
2361 * waiting to complete, so we're bound to just time out. In this
2362 * case, set the suspend state such that the wait will be
2363 * canceled, so we can complete as quickly as possible. */
2364 if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2365 VC_SUSPEND_IDLE) {
2366 set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2367 break;
2368 }
2369 /* If suspend is already in progress then we need to block */
2370 if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2371 /* Indicate that there are threads waiting on the resume
2372 * blocker. These need to be allowed to complete before
2373 * a _second_ call to force suspend can complete,
2374 * otherwise low priority threads might never actually
2375 * continue */
2376 arm_state->blocked_count++;
2377 write_unlock_bh(&arm_state->susp_res_lock);
2378 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2379 "blocked - waiting...", __func__, entity);
2380 if (wait_for_completion_killable(
2381 &arm_state->resume_blocker) != 0) {
2382 vchiq_log_error(vchiq_susp_log_level, "%s %s "
2383 "wait for resume blocker interrupted",
2384 __func__, entity);
2385 ret = VCHIQ_ERROR;
2386 write_lock_bh(&arm_state->susp_res_lock);
2387 arm_state->blocked_count--;
2388 write_unlock_bh(&arm_state->susp_res_lock);
2389 goto out;
2390 }
2391 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2392 "unblocked", __func__, entity);
2393 write_lock_bh(&arm_state->susp_res_lock);
2394 if (--arm_state->blocked_count == 0)
2395 complete_all(&arm_state->blocked_blocker);
2396 }
2397 }
2398
2399 stop_suspend_timer(arm_state);
2400
2401 local_uc = ++arm_state->videocore_use_count;
2402 local_entity_uc = ++(*entity_uc);
2403
2404 /* If there's a pending request which hasn't yet been serviced then
2405 * just clear it. If we're past VC_SUSPEND_REQUESTED state then
2406 * vc_resume_complete will block until we either resume or fail to
2407 * suspend */
2408 if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2409 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2410
2411 if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2412 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2413 vchiq_log_info(vchiq_susp_log_level,
2414 "%s %s count %d, state count %d",
2415 __func__, entity, local_entity_uc, local_uc);
2416 request_poll(state, NULL, 0);
2417 } else
2418 vchiq_log_trace(vchiq_susp_log_level,
2419 "%s %s count %d, state count %d",
2420 __func__, entity, *entity_uc, local_uc);
2421
2422
2423 write_unlock_bh(&arm_state->susp_res_lock);
2424
2425 /* Completion is in a done state when we're not suspended, so this won't
2426 * block for the non-suspended case. */
2427 if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
2428 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
2429 __func__, entity);
2430 if (wait_for_completion_killable(
2431 &arm_state->vc_resume_complete) != 0) {
2432 vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
2433 "resume interrupted", __func__, entity);
2434 ret = VCHIQ_ERROR;
2435 goto out;
2436 }
2437 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
2438 entity);
2439 }
2440
2441 if (ret == VCHIQ_SUCCESS) {
2442 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2443 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2444 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2445 /* Send the use notify to videocore */
2446 status = vchiq_send_remote_use_active(state);
2447 if (status == VCHIQ_SUCCESS)
2448 ack_cnt--;
2449 else
2450 atomic_add(ack_cnt,
2451 &arm_state->ka_use_ack_count);
2452 }
2453 }
2454
2455out:
2456 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2457 return ret;
2458}
2459
2460VCHIQ_STATUS_T
2461vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
2462{
2463 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2464 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2465 char entity[16];
2466 int *entity_uc;
2467 int local_uc, local_entity_uc;
2468
2469 if (!arm_state)
2470 goto out;
2471
2472 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2473
2474 if (service) {
2475 sprintf(entity, "%c%c%c%c:%03d",
2476 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2477 service->client_id);
2478 entity_uc = &service->service_use_count;
2479 } else {
2480 sprintf(entity, "PEER: ");
2481 entity_uc = &arm_state->peer_use_count;
2482 }
2483
2484 write_lock_bh(&arm_state->susp_res_lock);
2485 if (!arm_state->videocore_use_count || !(*entity_uc)) {
2486 /* Don't use BUG_ON - don't allow user thread to crash kernel */
2487 WARN_ON(!arm_state->videocore_use_count);
2488 WARN_ON(!(*entity_uc));
2489 ret = VCHIQ_ERROR;
2490 goto unlock;
2491 }
2492 local_uc = --arm_state->videocore_use_count;
2493 local_entity_uc = --(*entity_uc);
2494
2495 if (!vchiq_videocore_wanted(state)) {
2496 if (vchiq_platform_use_suspend_timer() &&
2497 !arm_state->resume_blocked) {
2498 /* Only use the timer if we're not trying to force
2499 * suspend (=> resume_blocked) */
2500 start_suspend_timer(arm_state);
2501 } else {
2502 vchiq_log_info(vchiq_susp_log_level,
2503 "%s %s count %d, state count %d - suspending",
2504 __func__, entity, *entity_uc,
2505 arm_state->videocore_use_count);
2506 vchiq_arm_vcsuspend(state);
2507 }
2508 } else
2509 vchiq_log_trace(vchiq_susp_log_level,
2510 "%s %s count %d, state count %d",
2511 __func__, entity, *entity_uc,
2512 arm_state->videocore_use_count);
2513
2514unlock:
2515 write_unlock_bh(&arm_state->susp_res_lock);
2516
2517out:
2518 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2519 return ret;
2520}
2521
2522void
2523vchiq_on_remote_use(VCHIQ_STATE_T *state)
2524{
2525 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2526 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2527 atomic_inc(&arm_state->ka_use_count);
2528 complete(&arm_state->ka_evt);
2529}
2530
2531void
2532vchiq_on_remote_release(VCHIQ_STATE_T *state)
2533{
2534 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2535 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2536 atomic_inc(&arm_state->ka_release_count);
2537 complete(&arm_state->ka_evt);
2538}
2539
2540VCHIQ_STATUS_T
2541vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
2542{
2543 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2544}
2545
2546VCHIQ_STATUS_T
2547vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
2548{
2549 return vchiq_release_internal(service->state, service);
2550}
2551
2552VCHIQ_DEBUGFS_NODE_T *
2553vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
2554{
2555 return &instance->debugfs_node;
2556}
2557
2558int
2559vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
2560{
2561 VCHIQ_SERVICE_T *service;
2562 int use_count = 0, i;
2563 i = 0;
2564 while ((service = next_service_by_instance(instance->state,
2565 instance, &i)) != NULL) {
2566 use_count += service->service_use_count;
2567 unlock_service(service);
2568 }
2569 return use_count;
2570}
2571
2572int
2573vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
2574{
2575 return instance->pid;
2576}
2577
2578int
2579vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
2580{
2581 return instance->trace;
2582}
2583
2584void
2585vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
2586{
2587 VCHIQ_SERVICE_T *service;
2588 int i;
2589 i = 0;
2590 while ((service = next_service_by_instance(instance->state,
2591 instance, &i)) != NULL) {
2592 service->trace = trace;
2593 unlock_service(service);
2594 }
2595 instance->trace = (trace != 0);
2596}
2597
2598static void suspend_timer_callback(unsigned long context)
2599{
2600 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
2601 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2602 if (!arm_state)
2603 goto out;
2604 vchiq_log_info(vchiq_susp_log_level,
2605 "%s - suspend timer expired - check suspend", __func__);
2606 vchiq_check_suspend(state);
2607out:
2608 return;
2609}
2610
2611VCHIQ_STATUS_T
2612vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
2613{
2614 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2615 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2616 if (service) {
2617 ret = vchiq_use_internal(service->state, service,
2618 USE_TYPE_SERVICE_NO_RESUME);
2619 unlock_service(service);
2620 }
2621 return ret;
2622}
2623
2624VCHIQ_STATUS_T
2625vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
2626{
2627 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2628 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2629 if (service) {
2630 ret = vchiq_use_internal(service->state, service,
2631 USE_TYPE_SERVICE);
2632 unlock_service(service);
2633 }
2634 return ret;
2635}
2636
2637VCHIQ_STATUS_T
2638vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
2639{
2640 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2641 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2642 if (service) {
2643 ret = vchiq_release_internal(service->state, service);
2644 unlock_service(service);
2645 }
2646 return ret;
2647}
2648
2649void
2650vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
2651{
2652 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2653 int i, j = 0;
2654 /* Only dump 64 services */
2655 static const int local_max_services = 64;
2656 /* If there's more than 64 services, only dump ones with
2657 * non-zero counts */
2658 int only_nonzero = 0;
2659 static const char *nz = "<-- preventing suspend";
2660
2661 enum vc_suspend_status vc_suspend_state;
2662 enum vc_resume_status vc_resume_state;
2663 int peer_count;
2664 int vc_use_count;
2665 int active_services;
2666 struct service_data_struct {
2667 int fourcc;
2668 int clientid;
2669 int use_count;
2670 } service_data[local_max_services];
2671
2672 if (!arm_state)
2673 return;
2674
2675 read_lock_bh(&arm_state->susp_res_lock);
2676 vc_suspend_state = arm_state->vc_suspend_state;
2677 vc_resume_state = arm_state->vc_resume_state;
2678 peer_count = arm_state->peer_use_count;
2679 vc_use_count = arm_state->videocore_use_count;
2680 active_services = state->unused_service;
2681 if (active_services > local_max_services)
2682 only_nonzero = 1;
2683
2684 for (i = 0; (i < active_services) && (j < local_max_services); i++) {
2685 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2686 if (!service_ptr)
2687 continue;
2688
2689 if (only_nonzero && !service_ptr->service_use_count)
2690 continue;
2691
2692 if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
2693 service_data[j].fourcc = service_ptr->base.fourcc;
2694 service_data[j].clientid = service_ptr->client_id;
2695 service_data[j++].use_count = service_ptr->
2696 service_use_count;
2697 }
2698 }
2699
2700 read_unlock_bh(&arm_state->susp_res_lock);
2701
2702 vchiq_log_warning(vchiq_susp_log_level,
2703 "-- Videcore suspend state: %s --",
2704 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
2705 vchiq_log_warning(vchiq_susp_log_level,
2706 "-- Videcore resume state: %s --",
2707 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
2708
2709 if (only_nonzero)
2710 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2711 "services (%d). Only dumping up to first %d services "
2712 "with non-zero use-count", active_services,
2713 local_max_services);
2714
2715 for (i = 0; i < j; i++) {
2716 vchiq_log_warning(vchiq_susp_log_level,
2717 "----- %c%c%c%c:%d service count %d %s",
2718 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2719 service_data[i].clientid,
2720 service_data[i].use_count,
2721 service_data[i].use_count ? nz : "");
2722 }
2723 vchiq_log_warning(vchiq_susp_log_level,
2724 "----- VCHIQ use count count %d", peer_count);
2725 vchiq_log_warning(vchiq_susp_log_level,
2726 "--- Overall vchiq instance use count %d", vc_use_count);
2727
2728 vchiq_dump_platform_use_state(state);
2729}
2730
2731VCHIQ_STATUS_T
2732vchiq_check_service(VCHIQ_SERVICE_T *service)
2733{
2734 VCHIQ_ARM_STATE_T *arm_state;
2735 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2736
2737 if (!service || !service->state)
2738 goto out;
2739
2740 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2741
2742 arm_state = vchiq_platform_get_arm_state(service->state);
2743
2744 read_lock_bh(&arm_state->susp_res_lock);
2745 if (service->service_use_count)
2746 ret = VCHIQ_SUCCESS;
2747 read_unlock_bh(&arm_state->susp_res_lock);
2748
2749 if (ret == VCHIQ_ERROR) {
2750 vchiq_log_error(vchiq_susp_log_level,
2751 "%s ERROR - %c%c%c%c:%d service count %d, "
2752 "state count %d, videocore suspend state %s", __func__,
2753 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2754 service->client_id, service->service_use_count,
2755 arm_state->videocore_use_count,
2756 suspend_state_names[arm_state->vc_suspend_state +
2757 VC_SUSPEND_NUM_OFFSET]);
2758 vchiq_dump_service_use_state(service->state);
2759 }
2760out:
2761 return ret;
2762}
2763
2764/* stub functions */
2765void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
2766{
2767 (void)state;
2768}
2769
2770void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
2771 VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
2772{
2773 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2774 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2775 get_conn_state_name(oldstate), get_conn_state_name(newstate));
2776 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
2777 write_lock_bh(&arm_state->susp_res_lock);
2778 if (!arm_state->first_connect) {
2779 char threadname[10];
2780 arm_state->first_connect = 1;
2781 write_unlock_bh(&arm_state->susp_res_lock);
2782 snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
2783 state->id);
2784 arm_state->ka_thread = kthread_create(
2785 &vchiq_keepalive_thread_func,
2786 (void *)state,
2787 threadname);
b04451d7 2788 if (IS_ERR(arm_state->ka_thread)) {
71bad7f0 2789 vchiq_log_error(vchiq_susp_log_level,
2790 "vchiq: FATAL: couldn't create thread %s",
2791 threadname);
2792 } else {
2793 wake_up_process(arm_state->ka_thread);
2794 }
2795 } else
2796 write_unlock_bh(&arm_state->susp_res_lock);
2797 }
2798}
2799
2800static int vchiq_probe(struct platform_device *pdev)
2801{
2802 struct device_node *fw_node;
2803 struct rpi_firmware *fw;
2804 int err;
2805 void *ptr_err;
2806
2807 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
71bad7f0 2808 if (!fw_node) {
2809 dev_err(&pdev->dev, "Missing firmware node\n");
2810 return -ENOENT;
2811 }
bea845af 2812
71bad7f0 2813 fw = rpi_firmware_get(fw_node);
d10543ec 2814 of_node_put(fw_node);
71bad7f0 2815 if (!fw)
2816 return -EPROBE_DEFER;
2817
2818 platform_set_drvdata(pdev, fw);
2819
9224c15c 2820 err = vchiq_platform_init(pdev, &g_state);
71bad7f0 2821 if (err != 0)
9224c15c 2822 goto failed_platform_init;
71bad7f0 2823
2824 err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
2825 if (err != 0) {
2826 vchiq_log_error(vchiq_arm_log_level,
2827 "Unable to allocate device number");
9224c15c 2828 goto failed_platform_init;
71bad7f0 2829 }
2830 cdev_init(&vchiq_cdev, &vchiq_fops);
2831 vchiq_cdev.owner = THIS_MODULE;
2832 err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
2833 if (err != 0) {
2834 vchiq_log_error(vchiq_arm_log_level,
2835 "Unable to register device");
2836 goto failed_cdev_add;
2837 }
2838
2839 /* create sysfs entries */
2840 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
2841 ptr_err = vchiq_class;
2842 if (IS_ERR(ptr_err))
2843 goto failed_class_create;
2844
2845 vchiq_dev = device_create(vchiq_class, NULL,
2846 vchiq_devid, NULL, "vchiq");
2847 ptr_err = vchiq_dev;
2848 if (IS_ERR(ptr_err))
2849 goto failed_device_create;
2850
9224c15c
SW
2851 /* create debugfs entries */
2852 err = vchiq_debugfs_init();
71bad7f0 2853 if (err != 0)
9224c15c 2854 goto failed_debugfs_init;
71bad7f0 2855
2856 vchiq_log_info(vchiq_arm_log_level,
2857 "vchiq: initialised - version %d (min %d), device %d.%d",
2858 VCHIQ_VERSION, VCHIQ_VERSION_MIN,
2859 MAJOR(vchiq_devid), MINOR(vchiq_devid));
2860
2861 return 0;
2862
9224c15c 2863failed_debugfs_init:
71bad7f0 2864 device_destroy(vchiq_class, vchiq_devid);
2865failed_device_create:
2866 class_destroy(vchiq_class);
2867failed_class_create:
2868 cdev_del(&vchiq_cdev);
2869 err = PTR_ERR(ptr_err);
2870failed_cdev_add:
2871 unregister_chrdev_region(vchiq_devid, 1);
9224c15c 2872failed_platform_init:
71bad7f0 2873 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2874 return err;
2875}
2876
2877static int vchiq_remove(struct platform_device *pdev)
2878{
0ece01c6 2879 vchiq_debugfs_deinit();
71bad7f0 2880 device_destroy(vchiq_class, vchiq_devid);
2881 class_destroy(vchiq_class);
2882 cdev_del(&vchiq_cdev);
2883 unregister_chrdev_region(vchiq_devid, 1);
2884
2885 return 0;
2886}
2887
2888static const struct of_device_id vchiq_of_match[] = {
2889 { .compatible = "brcm,bcm2835-vchiq", },
2890 {},
2891};
2892MODULE_DEVICE_TABLE(of, vchiq_of_match);
2893
2894static struct platform_driver vchiq_driver = {
2895 .driver = {
2896 .name = "bcm2835_vchiq",
71bad7f0 2897 .of_match_table = vchiq_of_match,
2898 },
2899 .probe = vchiq_probe,
2900 .remove = vchiq_remove,
2901};
2902module_platform_driver(vchiq_driver);
2903
2904MODULE_LICENSE("GPL");
8f8a3402 2905MODULE_DESCRIPTION("Videocore VCHIQ driver");
71bad7f0 2906MODULE_AUTHOR("Broadcom Corporation");