]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_arm.c
1 /**
2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
3 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The names of the above-listed copyright holders may not be used
15 * to endorse or promote products derived from this software without
16 * specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2, as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/sched/signal.h>
38 #include <linux/types.h>
39 #include <linux/errno.h>
40 #include <linux/cdev.h>
41 #include <linux/fs.h>
42 #include <linux/device.h>
43 #include <linux/mm.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/bug.h>
47 #include <linux/completion.h>
48 #include <linux/list.h>
49 #include <linux/of.h>
50 #include <linux/platform_device.h>
51 #include <linux/compat.h>
52 #include <linux/dma-mapping.h>
53 #include <soc/bcm2835/raspberrypi-firmware.h>
54
55 #include "vchiq_core.h"
56 #include "vchiq_ioctl.h"
57 #include "vchiq_arm.h"
58 #include "vchiq_debugfs.h"
59
60 #define DEVICE_NAME "vchiq"
61
62 /* Override the default prefix, which would be vchiq_arm (from the filename) */
63 #undef MODULE_PARAM_PREFIX
64 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
65
66 /* Some per-instance constants */
67 #define MAX_COMPLETIONS 128
68 #define MAX_SERVICES 64
69 #define MAX_ELEMENTS 8
70 #define MSG_QUEUE_SIZE 128
71
72 #define KEEPALIVE_VER 1
73 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
74
75 /* Run time control of log level, based on KERN_XXX level. */
76 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
77 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
78
79 #define SUSPEND_TIMER_TIMEOUT_MS 100
80 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
81
82 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
83 static const char *const suspend_state_names[] = {
84 "VC_SUSPEND_FORCE_CANCELED",
85 "VC_SUSPEND_REJECTED",
86 "VC_SUSPEND_FAILED",
87 "VC_SUSPEND_IDLE",
88 "VC_SUSPEND_REQUESTED",
89 "VC_SUSPEND_IN_PROGRESS",
90 "VC_SUSPEND_SUSPENDED"
91 };
92 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
93 static const char *const resume_state_names[] = {
94 "VC_RESUME_FAILED",
95 "VC_RESUME_IDLE",
96 "VC_RESUME_REQUESTED",
97 "VC_RESUME_IN_PROGRESS",
98 "VC_RESUME_RESUMED"
99 };
100 /* The number of times we allow force suspend to timeout before actually
101 ** _forcing_ suspend. This is to cater for SW which fails to release vchiq
102 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
103 */
104 #define FORCE_SUSPEND_FAIL_MAX 8
105
106 /* The time in ms allowed for videocore to go idle when force suspend has been
107 * requested */
108 #define FORCE_SUSPEND_TIMEOUT_MS 200
109
110 static void suspend_timer_callback(struct timer_list *t);
111
112 struct user_service {
113 struct vchiq_service *service;
114 void *userdata;
115 VCHIQ_INSTANCE_T instance;
116 char is_vchi;
117 char dequeue_pending;
118 char close_pending;
119 int message_available_pos;
120 int msg_insert;
121 int msg_remove;
122 struct completion insert_event;
123 struct completion remove_event;
124 struct completion close_event;
125 struct vchiq_header *msg_queue[MSG_QUEUE_SIZE];
126 };
127
128 struct bulk_waiter_node {
129 struct bulk_waiter bulk_waiter;
130 int pid;
131 struct list_head list;
132 };
133
134 struct vchiq_instance_struct {
135 struct vchiq_state *state;
136 struct vchiq_completion_data completions[MAX_COMPLETIONS];
137 int completion_insert;
138 int completion_remove;
139 struct completion insert_event;
140 struct completion remove_event;
141 struct mutex completion_mutex;
142
143 int connected;
144 int closing;
145 int pid;
146 int mark;
147 int use_close_delivered;
148 int trace;
149
150 struct list_head bulk_waiter_list;
151 struct mutex bulk_waiter_list_mutex;
152
153 struct vchiq_debugfs_node debugfs_node;
154 };
155
156 struct dump_context {
157 char __user *buf;
158 size_t actual;
159 size_t space;
160 loff_t offset;
161 };
162
163 static struct cdev vchiq_cdev;
164 static dev_t vchiq_devid;
165 static struct vchiq_state g_state;
166 static struct class *vchiq_class;
167 static DEFINE_SPINLOCK(msg_queue_spinlock);
168 static struct platform_device *bcm2835_camera;
169 static struct platform_device *bcm2835_audio;
170
171 static struct vchiq_drvdata bcm2835_drvdata = {
172 .cache_line_size = 32,
173 };
174
175 static struct vchiq_drvdata bcm2836_drvdata = {
176 .cache_line_size = 64,
177 };
178
179 static const char *const ioctl_names[] = {
180 "CONNECT",
181 "SHUTDOWN",
182 "CREATE_SERVICE",
183 "REMOVE_SERVICE",
184 "QUEUE_MESSAGE",
185 "QUEUE_BULK_TRANSMIT",
186 "QUEUE_BULK_RECEIVE",
187 "AWAIT_COMPLETION",
188 "DEQUEUE_MESSAGE",
189 "GET_CLIENT_ID",
190 "GET_CONFIG",
191 "CLOSE_SERVICE",
192 "USE_SERVICE",
193 "RELEASE_SERVICE",
194 "SET_SERVICE_OPTION",
195 "DUMP_PHYS_MEM",
196 "LIB_VERSION",
197 "CLOSE_DELIVERED"
198 };
199
200 vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
201 (VCHIQ_IOC_MAX + 1));
202
203 static VCHIQ_STATUS_T
204 vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
205 unsigned int size, VCHIQ_BULK_DIR_T dir);
206
207 #define VCHIQ_INIT_RETRIES 10
208 VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
209 {
210 VCHIQ_STATUS_T status = VCHIQ_ERROR;
211 struct vchiq_state *state;
212 VCHIQ_INSTANCE_T instance = NULL;
213 int i;
214
215 vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
216
217 /* VideoCore may not be ready due to boot up timing.
218 * It may never be ready if kernel and firmware are mismatched,so don't
219 * block forever.
220 */
221 for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
222 state = vchiq_get_state();
223 if (state)
224 break;
225 usleep_range(500, 600);
226 }
227 if (i == VCHIQ_INIT_RETRIES) {
228 vchiq_log_error(vchiq_core_log_level,
229 "%s: videocore not initialized\n", __func__);
230 goto failed;
231 } else if (i > 0) {
232 vchiq_log_warning(vchiq_core_log_level,
233 "%s: videocore initialized after %d retries\n",
234 __func__, i);
235 }
236
237 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
238 if (!instance) {
239 vchiq_log_error(vchiq_core_log_level,
240 "%s: error allocating vchiq instance\n", __func__);
241 goto failed;
242 }
243
244 instance->connected = 0;
245 instance->state = state;
246 mutex_init(&instance->bulk_waiter_list_mutex);
247 INIT_LIST_HEAD(&instance->bulk_waiter_list);
248
249 *instance_out = instance;
250
251 status = VCHIQ_SUCCESS;
252
253 failed:
254 vchiq_log_trace(vchiq_core_log_level,
255 "%s(%p): returning %d", __func__, instance, status);
256
257 return status;
258 }
259 EXPORT_SYMBOL(vchiq_initialise);
260
261 VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
262 {
263 VCHIQ_STATUS_T status;
264 struct vchiq_state *state = instance->state;
265
266 vchiq_log_trace(vchiq_core_log_level,
267 "%s(%p) called", __func__, instance);
268
269 if (mutex_lock_killable(&state->mutex) != 0)
270 return VCHIQ_RETRY;
271
272 /* Remove all services */
273 status = vchiq_shutdown_internal(state, instance);
274
275 mutex_unlock(&state->mutex);
276
277 vchiq_log_trace(vchiq_core_log_level,
278 "%s(%p): returning %d", __func__, instance, status);
279
280 if (status == VCHIQ_SUCCESS) {
281 struct bulk_waiter_node *waiter, *next;
282
283 list_for_each_entry_safe(waiter, next,
284 &instance->bulk_waiter_list, list) {
285 list_del(&waiter->list);
286 vchiq_log_info(vchiq_arm_log_level,
287 "bulk_waiter - cleaned up %pK for pid %d",
288 waiter, waiter->pid);
289 kfree(waiter);
290 }
291 kfree(instance);
292 }
293
294 return status;
295 }
296 EXPORT_SYMBOL(vchiq_shutdown);
297
298 static int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
299 {
300 return instance->connected;
301 }
302
303 VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
304 {
305 VCHIQ_STATUS_T status;
306 struct vchiq_state *state = instance->state;
307
308 vchiq_log_trace(vchiq_core_log_level,
309 "%s(%p) called", __func__, instance);
310
311 if (mutex_lock_killable(&state->mutex) != 0) {
312 vchiq_log_trace(vchiq_core_log_level,
313 "%s: call to mutex_lock failed", __func__);
314 status = VCHIQ_RETRY;
315 goto failed;
316 }
317 status = vchiq_connect_internal(state, instance);
318
319 if (status == VCHIQ_SUCCESS)
320 instance->connected = 1;
321
322 mutex_unlock(&state->mutex);
323
324 failed:
325 vchiq_log_trace(vchiq_core_log_level,
326 "%s(%p): returning %d", __func__, instance, status);
327
328 return status;
329 }
330 EXPORT_SYMBOL(vchiq_connect);
331
332 VCHIQ_STATUS_T vchiq_add_service(
333 VCHIQ_INSTANCE_T instance,
334 const struct vchiq_service_params *params,
335 VCHIQ_SERVICE_HANDLE_T *phandle)
336 {
337 VCHIQ_STATUS_T status;
338 struct vchiq_state *state = instance->state;
339 struct vchiq_service *service = NULL;
340 int srvstate;
341
342 vchiq_log_trace(vchiq_core_log_level,
343 "%s(%p) called", __func__, instance);
344
345 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
346
347 srvstate = vchiq_is_connected(instance)
348 ? VCHIQ_SRVSTATE_LISTENING
349 : VCHIQ_SRVSTATE_HIDDEN;
350
351 service = vchiq_add_service_internal(
352 state,
353 params,
354 srvstate,
355 instance,
356 NULL);
357
358 if (service) {
359 *phandle = service->handle;
360 status = VCHIQ_SUCCESS;
361 } else
362 status = VCHIQ_ERROR;
363
364 vchiq_log_trace(vchiq_core_log_level,
365 "%s(%p): returning %d", __func__, instance, status);
366
367 return status;
368 }
369 EXPORT_SYMBOL(vchiq_add_service);
370
371 VCHIQ_STATUS_T vchiq_open_service(
372 VCHIQ_INSTANCE_T instance,
373 const struct vchiq_service_params *params,
374 VCHIQ_SERVICE_HANDLE_T *phandle)
375 {
376 VCHIQ_STATUS_T status = VCHIQ_ERROR;
377 struct vchiq_state *state = instance->state;
378 struct vchiq_service *service = NULL;
379
380 vchiq_log_trace(vchiq_core_log_level,
381 "%s(%p) called", __func__, instance);
382
383 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
384
385 if (!vchiq_is_connected(instance))
386 goto failed;
387
388 service = vchiq_add_service_internal(state,
389 params,
390 VCHIQ_SRVSTATE_OPENING,
391 instance,
392 NULL);
393
394 if (service) {
395 *phandle = service->handle;
396 status = vchiq_open_service_internal(service, current->pid);
397 if (status != VCHIQ_SUCCESS) {
398 vchiq_remove_service(service->handle);
399 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
400 }
401 }
402
403 failed:
404 vchiq_log_trace(vchiq_core_log_level,
405 "%s(%p): returning %d", __func__, instance, status);
406
407 return status;
408 }
409 EXPORT_SYMBOL(vchiq_open_service);
410
411 VCHIQ_STATUS_T
412 vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
413 unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
414 {
415 VCHIQ_STATUS_T status;
416
417 switch (mode) {
418 case VCHIQ_BULK_MODE_NOCALLBACK:
419 case VCHIQ_BULK_MODE_CALLBACK:
420 status = vchiq_bulk_transfer(handle, (void *)data, size,
421 userdata, mode,
422 VCHIQ_BULK_TRANSMIT);
423 break;
424 case VCHIQ_BULK_MODE_BLOCKING:
425 status = vchiq_blocking_bulk_transfer(handle,
426 (void *)data, size, VCHIQ_BULK_TRANSMIT);
427 break;
428 default:
429 return VCHIQ_ERROR;
430 }
431
432 return status;
433 }
434 EXPORT_SYMBOL(vchiq_bulk_transmit);
435
436 VCHIQ_STATUS_T
437 vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
438 unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
439 {
440 VCHIQ_STATUS_T status;
441
442 switch (mode) {
443 case VCHIQ_BULK_MODE_NOCALLBACK:
444 case VCHIQ_BULK_MODE_CALLBACK:
445 status = vchiq_bulk_transfer(handle, data, size, userdata,
446 mode, VCHIQ_BULK_RECEIVE);
447 break;
448 case VCHIQ_BULK_MODE_BLOCKING:
449 status = vchiq_blocking_bulk_transfer(handle,
450 (void *)data, size, VCHIQ_BULK_RECEIVE);
451 break;
452 default:
453 return VCHIQ_ERROR;
454 }
455
456 return status;
457 }
458 EXPORT_SYMBOL(vchiq_bulk_receive);
459
460 static VCHIQ_STATUS_T
461 vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
462 unsigned int size, VCHIQ_BULK_DIR_T dir)
463 {
464 VCHIQ_INSTANCE_T instance;
465 struct vchiq_service *service;
466 VCHIQ_STATUS_T status;
467 struct bulk_waiter_node *waiter = NULL;
468
469 service = find_service_by_handle(handle);
470 if (!service)
471 return VCHIQ_ERROR;
472
473 instance = service->instance;
474
475 unlock_service(service);
476
477 mutex_lock(&instance->bulk_waiter_list_mutex);
478 list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
479 if (waiter->pid == current->pid) {
480 list_del(&waiter->list);
481 break;
482 }
483 }
484 mutex_unlock(&instance->bulk_waiter_list_mutex);
485
486 if (waiter) {
487 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
488
489 if (bulk) {
490 /* This thread has an outstanding bulk transfer. */
491 if ((bulk->data != data) ||
492 (bulk->size != size)) {
493 /* This is not a retry of the previous one.
494 * Cancel the signal when the transfer
495 * completes.
496 */
497 spin_lock(&bulk_waiter_spinlock);
498 bulk->userdata = NULL;
499 spin_unlock(&bulk_waiter_spinlock);
500 }
501 }
502 }
503
504 if (!waiter) {
505 waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
506 if (!waiter) {
507 vchiq_log_error(vchiq_core_log_level,
508 "%s - out of memory", __func__);
509 return VCHIQ_ERROR;
510 }
511 }
512
513 status = vchiq_bulk_transfer(handle, data, size, &waiter->bulk_waiter,
514 VCHIQ_BULK_MODE_BLOCKING, dir);
515 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
516 !waiter->bulk_waiter.bulk) {
517 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
518
519 if (bulk) {
520 /* Cancel the signal when the transfer
521 * completes.
522 */
523 spin_lock(&bulk_waiter_spinlock);
524 bulk->userdata = NULL;
525 spin_unlock(&bulk_waiter_spinlock);
526 }
527 kfree(waiter);
528 } else {
529 waiter->pid = current->pid;
530 mutex_lock(&instance->bulk_waiter_list_mutex);
531 list_add(&waiter->list, &instance->bulk_waiter_list);
532 mutex_unlock(&instance->bulk_waiter_list_mutex);
533 vchiq_log_info(vchiq_arm_log_level,
534 "saved bulk_waiter %pK for pid %d",
535 waiter, current->pid);
536 }
537
538 return status;
539 }
540 /****************************************************************************
541 *
542 * add_completion
543 *
544 ***************************************************************************/
545
546 static VCHIQ_STATUS_T
547 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
548 struct vchiq_header *header, struct user_service *user_service,
549 void *bulk_userdata)
550 {
551 struct vchiq_completion_data *completion;
552 int insert;
553
554 DEBUG_INITIALISE(g_state.local)
555
556 insert = instance->completion_insert;
557 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
558 /* Out of space - wait for the client */
559 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
560 vchiq_log_trace(vchiq_arm_log_level,
561 "%s - completion queue full", __func__);
562 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
563 if (wait_for_completion_killable( &instance->remove_event)) {
564 vchiq_log_info(vchiq_arm_log_level,
565 "service_callback interrupted");
566 return VCHIQ_RETRY;
567 } else if (instance->closing) {
568 vchiq_log_info(vchiq_arm_log_level,
569 "service_callback closing");
570 return VCHIQ_SUCCESS;
571 }
572 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
573 }
574
575 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
576
577 completion->header = header;
578 completion->reason = reason;
579 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
580 completion->service_userdata = user_service->service;
581 completion->bulk_userdata = bulk_userdata;
582
583 if (reason == VCHIQ_SERVICE_CLOSED) {
584 /* Take an extra reference, to be held until
585 this CLOSED notification is delivered. */
586 lock_service(user_service->service);
587 if (instance->use_close_delivered)
588 user_service->close_pending = 1;
589 }
590
591 /* A write barrier is needed here to ensure that the entire completion
592 record is written out before the insert point. */
593 wmb();
594
595 if (reason == VCHIQ_MESSAGE_AVAILABLE)
596 user_service->message_available_pos = insert;
597
598 insert++;
599 instance->completion_insert = insert;
600
601 complete(&instance->insert_event);
602
603 return VCHIQ_SUCCESS;
604 }
605
606 /****************************************************************************
607 *
608 * service_callback
609 *
610 ***************************************************************************/
611
612 static VCHIQ_STATUS_T
613 service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
614 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
615 {
616 /* How do we ensure the callback goes to the right client?
617 ** The service_user data points to a user_service record
618 ** containing the original callback and the user state structure, which
619 ** contains a circular buffer for completion records.
620 */
621 struct user_service *user_service;
622 struct vchiq_service *service;
623 VCHIQ_INSTANCE_T instance;
624 bool skip_completion = false;
625
626 DEBUG_INITIALISE(g_state.local)
627
628 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
629
630 service = handle_to_service(handle);
631 BUG_ON(!service);
632 user_service = (struct user_service *)service->base.userdata;
633 instance = user_service->instance;
634
635 if (!instance || instance->closing)
636 return VCHIQ_SUCCESS;
637
638 vchiq_log_trace(vchiq_arm_log_level,
639 "%s - service %lx(%d,%p), reason %d, header %lx, "
640 "instance %lx, bulk_userdata %lx",
641 __func__, (unsigned long)user_service,
642 service->localport, user_service->userdata,
643 reason, (unsigned long)header,
644 (unsigned long)instance, (unsigned long)bulk_userdata);
645
646 if (header && user_service->is_vchi) {
647 spin_lock(&msg_queue_spinlock);
648 while (user_service->msg_insert ==
649 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
650 spin_unlock(&msg_queue_spinlock);
651 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
652 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
653 vchiq_log_trace(vchiq_arm_log_level,
654 "service_callback - msg queue full");
655 /* If there is no MESSAGE_AVAILABLE in the completion
656 ** queue, add one
657 */
658 if ((user_service->message_available_pos -
659 instance->completion_remove) < 0) {
660 VCHIQ_STATUS_T status;
661
662 vchiq_log_info(vchiq_arm_log_level,
663 "Inserting extra MESSAGE_AVAILABLE");
664 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
665 status = add_completion(instance, reason,
666 NULL, user_service, bulk_userdata);
667 if (status != VCHIQ_SUCCESS) {
668 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
669 return status;
670 }
671 }
672
673 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
674 if (wait_for_completion_killable(
675 &user_service->remove_event)
676 != 0) {
677 vchiq_log_info(vchiq_arm_log_level,
678 "%s interrupted", __func__);
679 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
680 return VCHIQ_RETRY;
681 } else if (instance->closing) {
682 vchiq_log_info(vchiq_arm_log_level,
683 "%s closing", __func__);
684 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
685 return VCHIQ_ERROR;
686 }
687 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
688 spin_lock(&msg_queue_spinlock);
689 }
690
691 user_service->msg_queue[user_service->msg_insert &
692 (MSG_QUEUE_SIZE - 1)] = header;
693 user_service->msg_insert++;
694
695 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
696 ** there is a MESSAGE_AVAILABLE in the completion queue then
697 ** bypass the completion queue.
698 */
699 if (((user_service->message_available_pos -
700 instance->completion_remove) >= 0) ||
701 user_service->dequeue_pending) {
702 user_service->dequeue_pending = 0;
703 skip_completion = true;
704 }
705
706 spin_unlock(&msg_queue_spinlock);
707 complete(&user_service->insert_event);
708
709 header = NULL;
710 }
711 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
712
713 if (skip_completion)
714 return VCHIQ_SUCCESS;
715
716 return add_completion(instance, reason, header, user_service,
717 bulk_userdata);
718 }
719
720 /****************************************************************************
721 *
722 * user_service_free
723 *
724 ***************************************************************************/
725 static void
726 user_service_free(void *userdata)
727 {
728 kfree(userdata);
729 }
730
731 /****************************************************************************
732 *
733 * close_delivered
734 *
735 ***************************************************************************/
736 static void close_delivered(struct user_service *user_service)
737 {
738 vchiq_log_info(vchiq_arm_log_level,
739 "%s(handle=%x)",
740 __func__, user_service->service->handle);
741
742 if (user_service->close_pending) {
743 /* Allow the underlying service to be culled */
744 unlock_service(user_service->service);
745
746 /* Wake the user-thread blocked in close_ or remove_service */
747 complete(&user_service->close_event);
748
749 user_service->close_pending = 0;
750 }
751 }
752
753 struct vchiq_io_copy_callback_context {
754 struct vchiq_element *element;
755 size_t element_offset;
756 unsigned long elements_to_go;
757 };
758
759 static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
760 size_t offset, size_t maxsize)
761 {
762 struct vchiq_io_copy_callback_context *cc = context;
763 size_t total_bytes_copied = 0;
764 size_t bytes_this_round;
765
766 while (total_bytes_copied < maxsize) {
767 if (!cc->elements_to_go)
768 return total_bytes_copied;
769
770 if (!cc->element->size) {
771 cc->elements_to_go--;
772 cc->element++;
773 cc->element_offset = 0;
774 continue;
775 }
776
777 bytes_this_round = min(cc->element->size - cc->element_offset,
778 maxsize - total_bytes_copied);
779
780 if (copy_from_user(dest + total_bytes_copied,
781 cc->element->data + cc->element_offset,
782 bytes_this_round))
783 return -EFAULT;
784
785 cc->element_offset += bytes_this_round;
786 total_bytes_copied += bytes_this_round;
787
788 if (cc->element_offset == cc->element->size) {
789 cc->elements_to_go--;
790 cc->element++;
791 cc->element_offset = 0;
792 }
793 }
794
795 return maxsize;
796 }
797
798 /**************************************************************************
799 *
800 * vchiq_ioc_queue_message
801 *
802 **************************************************************************/
803 static VCHIQ_STATUS_T
804 vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
805 struct vchiq_element *elements,
806 unsigned long count)
807 {
808 struct vchiq_io_copy_callback_context context;
809 unsigned long i;
810 size_t total_size = 0;
811
812 context.element = elements;
813 context.element_offset = 0;
814 context.elements_to_go = count;
815
816 for (i = 0; i < count; i++) {
817 if (!elements[i].data && elements[i].size != 0)
818 return -EFAULT;
819
820 total_size += elements[i].size;
821 }
822
823 return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
824 &context, total_size);
825 }
826
827 /****************************************************************************
828 *
829 * vchiq_ioctl
830 *
831 ***************************************************************************/
832 static long
833 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
834 {
835 VCHIQ_INSTANCE_T instance = file->private_data;
836 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
837 struct vchiq_service *service = NULL;
838 long ret = 0;
839 int i, rc;
840
841 DEBUG_INITIALISE(g_state.local)
842
843 vchiq_log_trace(vchiq_arm_log_level,
844 "%s - instance %pK, cmd %s, arg %lx",
845 __func__, instance,
846 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
847 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
848 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
849
850 switch (cmd) {
851 case VCHIQ_IOC_SHUTDOWN:
852 if (!instance->connected)
853 break;
854
855 /* Remove all services */
856 i = 0;
857 while ((service = next_service_by_instance(instance->state,
858 instance, &i)) != NULL) {
859 status = vchiq_remove_service(service->handle);
860 unlock_service(service);
861 if (status != VCHIQ_SUCCESS)
862 break;
863 }
864 service = NULL;
865
866 if (status == VCHIQ_SUCCESS) {
867 /* Wake the completion thread and ask it to exit */
868 instance->closing = 1;
869 complete(&instance->insert_event);
870 }
871
872 break;
873
874 case VCHIQ_IOC_CONNECT:
875 if (instance->connected) {
876 ret = -EINVAL;
877 break;
878 }
879 rc = mutex_lock_killable(&instance->state->mutex);
880 if (rc != 0) {
881 vchiq_log_error(vchiq_arm_log_level,
882 "vchiq: connect: could not lock mutex for "
883 "state %d: %d",
884 instance->state->id, rc);
885 ret = -EINTR;
886 break;
887 }
888 status = vchiq_connect_internal(instance->state, instance);
889 mutex_unlock(&instance->state->mutex);
890
891 if (status == VCHIQ_SUCCESS)
892 instance->connected = 1;
893 else
894 vchiq_log_error(vchiq_arm_log_level,
895 "vchiq: could not connect: %d", status);
896 break;
897
898 case VCHIQ_IOC_CREATE_SERVICE: {
899 struct vchiq_create_service args;
900 struct user_service *user_service = NULL;
901 void *userdata;
902 int srvstate;
903
904 if (copy_from_user
905 (&args, (const void __user *)arg,
906 sizeof(args)) != 0) {
907 ret = -EFAULT;
908 break;
909 }
910
911 user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
912 if (!user_service) {
913 ret = -ENOMEM;
914 break;
915 }
916
917 if (args.is_open) {
918 if (!instance->connected) {
919 ret = -ENOTCONN;
920 kfree(user_service);
921 break;
922 }
923 srvstate = VCHIQ_SRVSTATE_OPENING;
924 } else {
925 srvstate =
926 instance->connected ?
927 VCHIQ_SRVSTATE_LISTENING :
928 VCHIQ_SRVSTATE_HIDDEN;
929 }
930
931 userdata = args.params.userdata;
932 args.params.callback = service_callback;
933 args.params.userdata = user_service;
934 service = vchiq_add_service_internal(
935 instance->state,
936 &args.params, srvstate,
937 instance, user_service_free);
938
939 if (service != NULL) {
940 user_service->service = service;
941 user_service->userdata = userdata;
942 user_service->instance = instance;
943 user_service->is_vchi = (args.is_vchi != 0);
944 user_service->dequeue_pending = 0;
945 user_service->close_pending = 0;
946 user_service->message_available_pos =
947 instance->completion_remove - 1;
948 user_service->msg_insert = 0;
949 user_service->msg_remove = 0;
950 init_completion(&user_service->insert_event);
951 init_completion(&user_service->remove_event);
952 init_completion(&user_service->close_event);
953
954 if (args.is_open) {
955 status = vchiq_open_service_internal
956 (service, instance->pid);
957 if (status != VCHIQ_SUCCESS) {
958 vchiq_remove_service(service->handle);
959 service = NULL;
960 ret = (status == VCHIQ_RETRY) ?
961 -EINTR : -EIO;
962 break;
963 }
964 }
965
966 if (copy_to_user((void __user *)
967 &(((struct vchiq_create_service __user *)
968 arg)->handle),
969 (const void *)&service->handle,
970 sizeof(service->handle)) != 0) {
971 ret = -EFAULT;
972 vchiq_remove_service(service->handle);
973 }
974
975 service = NULL;
976 } else {
977 ret = -EEXIST;
978 kfree(user_service);
979 }
980 } break;
981
982 case VCHIQ_IOC_CLOSE_SERVICE:
983 case VCHIQ_IOC_REMOVE_SERVICE: {
984 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
985 struct user_service *user_service;
986
987 service = find_service_for_instance(instance, handle);
988 if (!service) {
989 ret = -EINVAL;
990 break;
991 }
992
993 user_service = service->base.userdata;
994
995 /* close_pending is false on first entry, and when the
996 wait in vchiq_close_service has been interrupted. */
997 if (!user_service->close_pending) {
998 status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
999 vchiq_close_service(service->handle) :
1000 vchiq_remove_service(service->handle);
1001 if (status != VCHIQ_SUCCESS)
1002 break;
1003 }
1004
1005 /* close_pending is true once the underlying service
1006 has been closed until the client library calls the
1007 CLOSE_DELIVERED ioctl, signalling close_event. */
1008 if (user_service->close_pending &&
1009 wait_for_completion_killable(
1010 &user_service->close_event))
1011 status = VCHIQ_RETRY;
1012 break;
1013 }
1014
1015 case VCHIQ_IOC_USE_SERVICE:
1016 case VCHIQ_IOC_RELEASE_SERVICE: {
1017 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1018
1019 service = find_service_for_instance(instance, handle);
1020 if (service != NULL) {
1021 status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
1022 vchiq_use_service_internal(service) :
1023 vchiq_release_service_internal(service);
1024 if (status != VCHIQ_SUCCESS) {
1025 vchiq_log_error(vchiq_susp_log_level,
1026 "%s: cmd %s returned error %d for "
1027 "service %c%c%c%c:%03d",
1028 __func__,
1029 (cmd == VCHIQ_IOC_USE_SERVICE) ?
1030 "VCHIQ_IOC_USE_SERVICE" :
1031 "VCHIQ_IOC_RELEASE_SERVICE",
1032 status,
1033 VCHIQ_FOURCC_AS_4CHARS(
1034 service->base.fourcc),
1035 service->client_id);
1036 ret = -EINVAL;
1037 }
1038 } else
1039 ret = -EINVAL;
1040 } break;
1041
1042 case VCHIQ_IOC_QUEUE_MESSAGE: {
1043 struct vchiq_queue_message args;
1044
1045 if (copy_from_user
1046 (&args, (const void __user *)arg,
1047 sizeof(args)) != 0) {
1048 ret = -EFAULT;
1049 break;
1050 }
1051
1052 service = find_service_for_instance(instance, args.handle);
1053
1054 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
1055 /* Copy elements into kernel space */
1056 struct vchiq_element elements[MAX_ELEMENTS];
1057
1058 if (copy_from_user(elements, args.elements,
1059 args.count * sizeof(struct vchiq_element)) == 0)
1060 status = vchiq_ioc_queue_message
1061 (args.handle,
1062 elements, args.count);
1063 else
1064 ret = -EFAULT;
1065 } else {
1066 ret = -EINVAL;
1067 }
1068 } break;
1069
1070 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
1071 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
1072 struct vchiq_queue_bulk_transfer args;
1073 struct bulk_waiter_node *waiter = NULL;
1074
1075 VCHIQ_BULK_DIR_T dir =
1076 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
1077 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1078
1079 if (copy_from_user
1080 (&args, (const void __user *)arg,
1081 sizeof(args)) != 0) {
1082 ret = -EFAULT;
1083 break;
1084 }
1085
1086 service = find_service_for_instance(instance, args.handle);
1087 if (!service) {
1088 ret = -EINVAL;
1089 break;
1090 }
1091
1092 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
1093 waiter = kzalloc(sizeof(struct bulk_waiter_node),
1094 GFP_KERNEL);
1095 if (!waiter) {
1096 ret = -ENOMEM;
1097 break;
1098 }
1099
1100 args.userdata = &waiter->bulk_waiter;
1101 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
1102 mutex_lock(&instance->bulk_waiter_list_mutex);
1103 list_for_each_entry(waiter, &instance->bulk_waiter_list,
1104 list) {
1105 if (waiter->pid == current->pid) {
1106 list_del(&waiter->list);
1107 break;
1108 }
1109 }
1110 mutex_unlock(&instance->bulk_waiter_list_mutex);
1111 if (!waiter) {
1112 vchiq_log_error(vchiq_arm_log_level,
1113 "no bulk_waiter found for pid %d",
1114 current->pid);
1115 ret = -ESRCH;
1116 break;
1117 }
1118 vchiq_log_info(vchiq_arm_log_level,
1119 "found bulk_waiter %pK for pid %d", waiter,
1120 current->pid);
1121 args.userdata = &waiter->bulk_waiter;
1122 }
1123
1124 status = vchiq_bulk_transfer(args.handle, args.data, args.size,
1125 args.userdata, args.mode, dir);
1126
1127 if (!waiter)
1128 break;
1129
1130 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
1131 !waiter->bulk_waiter.bulk) {
1132 if (waiter->bulk_waiter.bulk) {
1133 /* Cancel the signal when the transfer
1134 ** completes. */
1135 spin_lock(&bulk_waiter_spinlock);
1136 waiter->bulk_waiter.bulk->userdata = NULL;
1137 spin_unlock(&bulk_waiter_spinlock);
1138 }
1139 kfree(waiter);
1140 } else {
1141 const VCHIQ_BULK_MODE_T mode_waiting =
1142 VCHIQ_BULK_MODE_WAITING;
1143 waiter->pid = current->pid;
1144 mutex_lock(&instance->bulk_waiter_list_mutex);
1145 list_add(&waiter->list, &instance->bulk_waiter_list);
1146 mutex_unlock(&instance->bulk_waiter_list_mutex);
1147 vchiq_log_info(vchiq_arm_log_level,
1148 "saved bulk_waiter %pK for pid %d",
1149 waiter, current->pid);
1150
1151 if (copy_to_user((void __user *)
1152 &(((struct vchiq_queue_bulk_transfer __user *)
1153 arg)->mode),
1154 (const void *)&mode_waiting,
1155 sizeof(mode_waiting)) != 0)
1156 ret = -EFAULT;
1157 }
1158 } break;
1159
1160 case VCHIQ_IOC_AWAIT_COMPLETION: {
1161 struct vchiq_await_completion args;
1162
1163 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1164 if (!instance->connected) {
1165 ret = -ENOTCONN;
1166 break;
1167 }
1168
1169 if (copy_from_user(&args, (const void __user *)arg,
1170 sizeof(args)) != 0) {
1171 ret = -EFAULT;
1172 break;
1173 }
1174
1175 mutex_lock(&instance->completion_mutex);
1176
1177 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1178 while ((instance->completion_remove ==
1179 instance->completion_insert)
1180 && !instance->closing) {
1181 int rc;
1182
1183 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1184 mutex_unlock(&instance->completion_mutex);
1185 rc = wait_for_completion_killable(
1186 &instance->insert_event);
1187 mutex_lock(&instance->completion_mutex);
1188 if (rc != 0) {
1189 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1190 vchiq_log_info(vchiq_arm_log_level,
1191 "AWAIT_COMPLETION interrupted");
1192 ret = -EINTR;
1193 break;
1194 }
1195 }
1196 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1197
1198 if (ret == 0) {
1199 int msgbufcount = args.msgbufcount;
1200 int remove = instance->completion_remove;
1201
1202 for (ret = 0; ret < args.count; ret++) {
1203 struct vchiq_completion_data *completion;
1204 struct vchiq_service *service;
1205 struct user_service *user_service;
1206 struct vchiq_header *header;
1207
1208 if (remove == instance->completion_insert)
1209 break;
1210
1211 completion = &instance->completions[
1212 remove & (MAX_COMPLETIONS - 1)];
1213
1214 /*
1215 * A read memory barrier is needed to stop
1216 * prefetch of a stale completion record
1217 */
1218 rmb();
1219
1220 service = completion->service_userdata;
1221 user_service = service->base.userdata;
1222 completion->service_userdata =
1223 user_service->userdata;
1224
1225 header = completion->header;
1226 if (header) {
1227 void __user *msgbuf;
1228 int msglen;
1229
1230 msglen = header->size +
1231 sizeof(struct vchiq_header);
1232 /* This must be a VCHIQ-style service */
1233 if (args.msgbufsize < msglen) {
1234 vchiq_log_error(
1235 vchiq_arm_log_level,
1236 "header %pK: msgbufsize %x < msglen %x",
1237 header, args.msgbufsize,
1238 msglen);
1239 WARN(1, "invalid message "
1240 "size\n");
1241 if (ret == 0)
1242 ret = -EMSGSIZE;
1243 break;
1244 }
1245 if (msgbufcount <= 0)
1246 /* Stall here for lack of a
1247 ** buffer for the message. */
1248 break;
1249 /* Get the pointer from user space */
1250 msgbufcount--;
1251 if (copy_from_user(&msgbuf,
1252 (const void __user *)
1253 &args.msgbufs[msgbufcount],
1254 sizeof(msgbuf)) != 0) {
1255 if (ret == 0)
1256 ret = -EFAULT;
1257 break;
1258 }
1259
1260 /* Copy the message to user space */
1261 if (copy_to_user(msgbuf, header,
1262 msglen) != 0) {
1263 if (ret == 0)
1264 ret = -EFAULT;
1265 break;
1266 }
1267
1268 /* Now it has been copied, the message
1269 ** can be released. */
1270 vchiq_release_message(service->handle,
1271 header);
1272
1273 /* The completion must point to the
1274 ** msgbuf. */
1275 completion->header = msgbuf;
1276 }
1277
1278 if ((completion->reason ==
1279 VCHIQ_SERVICE_CLOSED) &&
1280 !instance->use_close_delivered)
1281 unlock_service(service);
1282
1283 if (copy_to_user((void __user *)(
1284 (size_t)args.buf + ret *
1285 sizeof(struct vchiq_completion_data)),
1286 completion,
1287 sizeof(struct vchiq_completion_data))
1288 != 0) {
1289 if (ret == 0)
1290 ret = -EFAULT;
1291 break;
1292 }
1293
1294 /*
1295 * Ensure that the above copy has completed
1296 * before advancing the remove pointer.
1297 */
1298 mb();
1299 remove++;
1300 instance->completion_remove = remove;
1301 }
1302
1303 if (msgbufcount != args.msgbufcount) {
1304 if (copy_to_user((void __user *)
1305 &((struct vchiq_await_completion *)arg)
1306 ->msgbufcount,
1307 &msgbufcount,
1308 sizeof(msgbufcount)) != 0) {
1309 ret = -EFAULT;
1310 }
1311 }
1312 }
1313
1314 if (ret != 0)
1315 complete(&instance->remove_event);
1316 mutex_unlock(&instance->completion_mutex);
1317 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1318 } break;
1319
1320 case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1321 struct vchiq_dequeue_message args;
1322 struct user_service *user_service;
1323 struct vchiq_header *header;
1324
1325 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1326 if (copy_from_user
1327 (&args, (const void __user *)arg,
1328 sizeof(args)) != 0) {
1329 ret = -EFAULT;
1330 break;
1331 }
1332 service = find_service_for_instance(instance, args.handle);
1333 if (!service) {
1334 ret = -EINVAL;
1335 break;
1336 }
1337 user_service = (struct user_service *)service->base.userdata;
1338 if (user_service->is_vchi == 0) {
1339 ret = -EINVAL;
1340 break;
1341 }
1342
1343 spin_lock(&msg_queue_spinlock);
1344 if (user_service->msg_remove == user_service->msg_insert) {
1345 if (!args.blocking) {
1346 spin_unlock(&msg_queue_spinlock);
1347 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1348 ret = -EWOULDBLOCK;
1349 break;
1350 }
1351 user_service->dequeue_pending = 1;
1352 do {
1353 spin_unlock(&msg_queue_spinlock);
1354 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1355 if (wait_for_completion_killable(
1356 &user_service->insert_event)) {
1357 vchiq_log_info(vchiq_arm_log_level,
1358 "DEQUEUE_MESSAGE interrupted");
1359 ret = -EINTR;
1360 break;
1361 }
1362 spin_lock(&msg_queue_spinlock);
1363 } while (user_service->msg_remove ==
1364 user_service->msg_insert);
1365
1366 if (ret)
1367 break;
1368 }
1369
1370 BUG_ON((int)(user_service->msg_insert -
1371 user_service->msg_remove) < 0);
1372
1373 header = user_service->msg_queue[user_service->msg_remove &
1374 (MSG_QUEUE_SIZE - 1)];
1375 user_service->msg_remove++;
1376 spin_unlock(&msg_queue_spinlock);
1377
1378 complete(&user_service->remove_event);
1379 if (header == NULL)
1380 ret = -ENOTCONN;
1381 else if (header->size <= args.bufsize) {
1382 /* Copy to user space if msgbuf is not NULL */
1383 if ((args.buf == NULL) ||
1384 (copy_to_user((void __user *)args.buf,
1385 header->data,
1386 header->size) == 0)) {
1387 ret = header->size;
1388 vchiq_release_message(
1389 service->handle,
1390 header);
1391 } else
1392 ret = -EFAULT;
1393 } else {
1394 vchiq_log_error(vchiq_arm_log_level,
1395 "header %pK: bufsize %x < size %x",
1396 header, args.bufsize, header->size);
1397 WARN(1, "invalid size\n");
1398 ret = -EMSGSIZE;
1399 }
1400 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1401 } break;
1402
1403 case VCHIQ_IOC_GET_CLIENT_ID: {
1404 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1405
1406 ret = vchiq_get_client_id(handle);
1407 } break;
1408
1409 case VCHIQ_IOC_GET_CONFIG: {
1410 struct vchiq_get_config args;
1411 struct vchiq_config config;
1412
1413 if (copy_from_user(&args, (const void __user *)arg,
1414 sizeof(args)) != 0) {
1415 ret = -EFAULT;
1416 break;
1417 }
1418 if (args.config_size > sizeof(config)) {
1419 ret = -EINVAL;
1420 break;
1421 }
1422
1423 vchiq_get_config(&config);
1424 if (copy_to_user(args.pconfig, &config, args.config_size)) {
1425 ret = -EFAULT;
1426 break;
1427 }
1428 } break;
1429
1430 case VCHIQ_IOC_SET_SERVICE_OPTION: {
1431 struct vchiq_set_service_option args;
1432
1433 if (copy_from_user(
1434 &args, (const void __user *)arg,
1435 sizeof(args)) != 0) {
1436 ret = -EFAULT;
1437 break;
1438 }
1439
1440 service = find_service_for_instance(instance, args.handle);
1441 if (!service) {
1442 ret = -EINVAL;
1443 break;
1444 }
1445
1446 status = vchiq_set_service_option(
1447 args.handle, args.option, args.value);
1448 } break;
1449
1450 case VCHIQ_IOC_LIB_VERSION: {
1451 unsigned int lib_version = (unsigned int)arg;
1452
1453 if (lib_version < VCHIQ_VERSION_MIN)
1454 ret = -EINVAL;
1455 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1456 instance->use_close_delivered = 1;
1457 } break;
1458
1459 case VCHIQ_IOC_CLOSE_DELIVERED: {
1460 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1461
1462 service = find_closed_service_for_instance(instance, handle);
1463 if (service != NULL) {
1464 struct user_service *user_service =
1465 (struct user_service *)service->base.userdata;
1466 close_delivered(user_service);
1467 } else
1468 ret = -EINVAL;
1469 } break;
1470
1471 default:
1472 ret = -ENOTTY;
1473 break;
1474 }
1475
1476 if (service)
1477 unlock_service(service);
1478
1479 if (ret == 0) {
1480 if (status == VCHIQ_ERROR)
1481 ret = -EIO;
1482 else if (status == VCHIQ_RETRY)
1483 ret = -EINTR;
1484 }
1485
1486 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1487 (ret != -EWOULDBLOCK))
1488 vchiq_log_info(vchiq_arm_log_level,
1489 " ioctl instance %lx, cmd %s -> status %d, %ld",
1490 (unsigned long)instance,
1491 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1492 ioctl_names[_IOC_NR(cmd)] :
1493 "<invalid>",
1494 status, ret);
1495 else
1496 vchiq_log_trace(vchiq_arm_log_level,
1497 " ioctl instance %lx, cmd %s -> status %d, %ld",
1498 (unsigned long)instance,
1499 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1500 ioctl_names[_IOC_NR(cmd)] :
1501 "<invalid>",
1502 status, ret);
1503
1504 return ret;
1505 }
1506
1507 #if defined(CONFIG_COMPAT)
1508
1509 struct vchiq_service_params32 {
1510 int fourcc;
1511 compat_uptr_t callback;
1512 compat_uptr_t userdata;
1513 short version; /* Increment for non-trivial changes */
1514 short version_min; /* Update for incompatible changes */
1515 };
1516
1517 struct vchiq_create_service32 {
1518 struct vchiq_service_params32 params;
1519 int is_open;
1520 int is_vchi;
1521 unsigned int handle; /* OUT */
1522 };
1523
1524 #define VCHIQ_IOC_CREATE_SERVICE32 \
1525 _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
1526
1527 static long
1528 vchiq_compat_ioctl_create_service(
1529 struct file *file,
1530 unsigned int cmd,
1531 unsigned long arg)
1532 {
1533 struct vchiq_create_service __user *args;
1534 struct vchiq_create_service32 __user *ptrargs32 =
1535 (struct vchiq_create_service32 __user *)arg;
1536 struct vchiq_create_service32 args32;
1537 long ret;
1538
1539 args = compat_alloc_user_space(sizeof(*args));
1540 if (!args)
1541 return -EFAULT;
1542
1543 if (copy_from_user(&args32,
1544 (struct vchiq_create_service32 __user *)arg,
1545 sizeof(args32)))
1546 return -EFAULT;
1547
1548 if (put_user(args32.params.fourcc, &args->params.fourcc) ||
1549 put_user(compat_ptr(args32.params.callback),
1550 &args->params.callback) ||
1551 put_user(compat_ptr(args32.params.userdata),
1552 &args->params.userdata) ||
1553 put_user(args32.params.version, &args->params.version) ||
1554 put_user(args32.params.version_min,
1555 &args->params.version_min) ||
1556 put_user(args32.is_open, &args->is_open) ||
1557 put_user(args32.is_vchi, &args->is_vchi) ||
1558 put_user(args32.handle, &args->handle))
1559 return -EFAULT;
1560
1561 ret = vchiq_ioctl(file, VCHIQ_IOC_CREATE_SERVICE, (unsigned long)args);
1562
1563 if (ret < 0)
1564 return ret;
1565
1566 if (get_user(args32.handle, &args->handle))
1567 return -EFAULT;
1568
1569 if (copy_to_user(&ptrargs32->handle,
1570 &args32.handle,
1571 sizeof(args32.handle)))
1572 return -EFAULT;
1573
1574 return 0;
1575 }
1576
1577 struct vchiq_element32 {
1578 compat_uptr_t data;
1579 unsigned int size;
1580 };
1581
1582 struct vchiq_queue_message32 {
1583 unsigned int handle;
1584 unsigned int count;
1585 compat_uptr_t elements;
1586 };
1587
1588 #define VCHIQ_IOC_QUEUE_MESSAGE32 \
1589 _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32)
1590
1591 static long
1592 vchiq_compat_ioctl_queue_message(struct file *file,
1593 unsigned int cmd,
1594 unsigned long arg)
1595 {
1596 struct vchiq_queue_message *args;
1597 struct vchiq_element __user *elements;
1598 struct vchiq_queue_message32 args32;
1599 unsigned int count;
1600
1601 if (copy_from_user(&args32,
1602 (struct vchiq_queue_message32 __user *)arg,
1603 sizeof(args32)))
1604 return -EFAULT;
1605
1606 args = compat_alloc_user_space(sizeof(*args) +
1607 (sizeof(*elements) * MAX_ELEMENTS));
1608
1609 if (!args)
1610 return -EFAULT;
1611
1612 if (put_user(args32.handle, &args->handle) ||
1613 put_user(args32.count, &args->count) ||
1614 put_user(compat_ptr(args32.elements), &args->elements))
1615 return -EFAULT;
1616
1617 if (args32.count > MAX_ELEMENTS)
1618 return -EINVAL;
1619
1620 if (args32.elements && args32.count) {
1621 struct vchiq_element32 tempelement32[MAX_ELEMENTS];
1622
1623 elements = (struct vchiq_element __user *)(args + 1);
1624
1625 if (copy_from_user(&tempelement32,
1626 compat_ptr(args32.elements),
1627 sizeof(tempelement32)))
1628 return -EFAULT;
1629
1630 for (count = 0; count < args32.count; count++) {
1631 if (put_user(compat_ptr(tempelement32[count].data),
1632 &elements[count].data) ||
1633 put_user(tempelement32[count].size,
1634 &elements[count].size))
1635 return -EFAULT;
1636 }
1637
1638 if (put_user(elements, &args->elements))
1639 return -EFAULT;
1640 }
1641
1642 return vchiq_ioctl(file, VCHIQ_IOC_QUEUE_MESSAGE, (unsigned long)args);
1643 }
1644
1645 struct vchiq_queue_bulk_transfer32 {
1646 unsigned int handle;
1647 compat_uptr_t data;
1648 unsigned int size;
1649 compat_uptr_t userdata;
1650 VCHIQ_BULK_MODE_T mode;
1651 };
1652
1653 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1654 _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1655 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1656 _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1657
1658 static long
1659 vchiq_compat_ioctl_queue_bulk(struct file *file,
1660 unsigned int cmd,
1661 unsigned long arg)
1662 {
1663 struct vchiq_queue_bulk_transfer __user *args;
1664 struct vchiq_queue_bulk_transfer32 args32;
1665 struct vchiq_queue_bulk_transfer32 *ptrargs32 =
1666 (struct vchiq_queue_bulk_transfer32 *)arg;
1667 long ret;
1668
1669 args = compat_alloc_user_space(sizeof(*args));
1670 if (!args)
1671 return -EFAULT;
1672
1673 if (copy_from_user(&args32,
1674 (struct vchiq_queue_bulk_transfer32 __user *)arg,
1675 sizeof(args32)))
1676 return -EFAULT;
1677
1678 if (put_user(args32.handle, &args->handle) ||
1679 put_user(compat_ptr(args32.data), &args->data) ||
1680 put_user(args32.size, &args->size) ||
1681 put_user(compat_ptr(args32.userdata), &args->userdata) ||
1682 put_user(args32.mode, &args->mode))
1683 return -EFAULT;
1684
1685 if (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32)
1686 cmd = VCHIQ_IOC_QUEUE_BULK_TRANSMIT;
1687 else
1688 cmd = VCHIQ_IOC_QUEUE_BULK_RECEIVE;
1689
1690 ret = vchiq_ioctl(file, cmd, (unsigned long)args);
1691
1692 if (ret < 0)
1693 return ret;
1694
1695 if (get_user(args32.mode, &args->mode))
1696 return -EFAULT;
1697
1698 if (copy_to_user(&ptrargs32->mode,
1699 &args32.mode,
1700 sizeof(args32.mode)))
1701 return -EFAULT;
1702
1703 return 0;
1704 }
1705
1706 struct vchiq_completion_data32 {
1707 VCHIQ_REASON_T reason;
1708 compat_uptr_t header;
1709 compat_uptr_t service_userdata;
1710 compat_uptr_t bulk_userdata;
1711 };
1712
1713 struct vchiq_await_completion32 {
1714 unsigned int count;
1715 compat_uptr_t buf;
1716 unsigned int msgbufsize;
1717 unsigned int msgbufcount; /* IN/OUT */
1718 compat_uptr_t msgbufs;
1719 };
1720
1721 #define VCHIQ_IOC_AWAIT_COMPLETION32 \
1722 _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1723
1724 static long
1725 vchiq_compat_ioctl_await_completion(struct file *file,
1726 unsigned int cmd,
1727 unsigned long arg)
1728 {
1729 struct vchiq_await_completion __user *args;
1730 struct vchiq_completion_data __user *completion;
1731 struct vchiq_completion_data completiontemp;
1732 struct vchiq_await_completion32 args32;
1733 struct vchiq_completion_data32 completion32;
1734 unsigned int __user *msgbufcount32;
1735 unsigned int msgbufcount_native;
1736 compat_uptr_t msgbuf32;
1737 void __user *msgbuf;
1738 void * __user *msgbufptr;
1739 long ret;
1740
1741 args = compat_alloc_user_space(sizeof(*args) +
1742 sizeof(*completion) +
1743 sizeof(*msgbufptr));
1744 if (!args)
1745 return -EFAULT;
1746
1747 completion = (struct vchiq_completion_data __user *)(args + 1);
1748 msgbufptr = (void * __user *)(completion + 1);
1749
1750 if (copy_from_user(&args32,
1751 (struct vchiq_completion_data32 __user *)arg,
1752 sizeof(args32)))
1753 return -EFAULT;
1754
1755 if (put_user(args32.count, &args->count) ||
1756 put_user(compat_ptr(args32.buf), &args->buf) ||
1757 put_user(args32.msgbufsize, &args->msgbufsize) ||
1758 put_user(args32.msgbufcount, &args->msgbufcount) ||
1759 put_user(compat_ptr(args32.msgbufs), &args->msgbufs))
1760 return -EFAULT;
1761
1762 /* These are simple cases, so just fall into the native handler */
1763 if (!args32.count || !args32.buf || !args32.msgbufcount)
1764 return vchiq_ioctl(file,
1765 VCHIQ_IOC_AWAIT_COMPLETION,
1766 (unsigned long)args);
1767
1768 /*
1769 * These are the more complex cases. Typical applications of this
1770 * ioctl will use a very large count, with a very large msgbufcount.
1771 * Since the native ioctl can asynchronously fill in the returned
1772 * buffers and the application can in theory begin processing messages
1773 * even before the ioctl returns, a bit of a trick is used here.
1774 *
1775 * By forcing both count and msgbufcount to be 1, it forces the native
1776 * ioctl to only claim at most 1 message is available. This tricks
1777 * the calling application into thinking only 1 message was actually
1778 * available in the queue so like all good applications it will retry
1779 * waiting until all the required messages are received.
1780 *
1781 * This trick has been tested and proven to work with vchiq_test,
1782 * Minecraft_PI, the "hello pi" examples, and various other
1783 * applications that are included in Raspbian.
1784 */
1785
1786 if (copy_from_user(&msgbuf32,
1787 compat_ptr(args32.msgbufs) +
1788 (sizeof(compat_uptr_t) *
1789 (args32.msgbufcount - 1)),
1790 sizeof(msgbuf32)))
1791 return -EFAULT;
1792
1793 msgbuf = compat_ptr(msgbuf32);
1794
1795 if (copy_to_user(msgbufptr,
1796 &msgbuf,
1797 sizeof(msgbuf)))
1798 return -EFAULT;
1799
1800 if (copy_to_user(&args->msgbufs,
1801 &msgbufptr,
1802 sizeof(msgbufptr)))
1803 return -EFAULT;
1804
1805 if (put_user(1U, &args->count) ||
1806 put_user(completion, &args->buf) ||
1807 put_user(1U, &args->msgbufcount))
1808 return -EFAULT;
1809
1810 ret = vchiq_ioctl(file,
1811 VCHIQ_IOC_AWAIT_COMPLETION,
1812 (unsigned long)args);
1813
1814 /*
1815 * An return value of 0 here means that no messages where available
1816 * in the message queue. In this case the native ioctl does not
1817 * return any data to the application at all. Not even to update
1818 * msgbufcount. This functionality needs to be kept here for
1819 * compatibility.
1820 *
1821 * Of course, < 0 means that an error occurred and no data is being
1822 * returned.
1823 *
1824 * Since count and msgbufcount was forced to 1, that means
1825 * the only other possible return value is 1. Meaning that 1 message
1826 * was available, so that multiple message case does not need to be
1827 * handled here.
1828 */
1829 if (ret <= 0)
1830 return ret;
1831
1832 if (copy_from_user(&completiontemp, completion, sizeof(*completion)))
1833 return -EFAULT;
1834
1835 completion32.reason = completiontemp.reason;
1836 completion32.header = ptr_to_compat(completiontemp.header);
1837 completion32.service_userdata =
1838 ptr_to_compat(completiontemp.service_userdata);
1839 completion32.bulk_userdata =
1840 ptr_to_compat(completiontemp.bulk_userdata);
1841
1842 if (copy_to_user(compat_ptr(args32.buf),
1843 &completion32,
1844 sizeof(completion32)))
1845 return -EFAULT;
1846
1847 if (get_user(msgbufcount_native, &args->msgbufcount))
1848 return -EFAULT;
1849
1850 if (!msgbufcount_native)
1851 args32.msgbufcount--;
1852
1853 msgbufcount32 =
1854 &((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
1855
1856 if (copy_to_user(msgbufcount32,
1857 &args32.msgbufcount,
1858 sizeof(args32.msgbufcount)))
1859 return -EFAULT;
1860
1861 return 1;
1862 }
1863
1864 struct vchiq_dequeue_message32 {
1865 unsigned int handle;
1866 int blocking;
1867 unsigned int bufsize;
1868 compat_uptr_t buf;
1869 };
1870
1871 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1872 _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1873
1874 static long
1875 vchiq_compat_ioctl_dequeue_message(struct file *file,
1876 unsigned int cmd,
1877 unsigned long arg)
1878 {
1879 struct vchiq_dequeue_message __user *args;
1880 struct vchiq_dequeue_message32 args32;
1881
1882 args = compat_alloc_user_space(sizeof(*args));
1883 if (!args)
1884 return -EFAULT;
1885
1886 if (copy_from_user(&args32,
1887 (struct vchiq_dequeue_message32 __user *)arg,
1888 sizeof(args32)))
1889 return -EFAULT;
1890
1891 if (put_user(args32.handle, &args->handle) ||
1892 put_user(args32.blocking, &args->blocking) ||
1893 put_user(args32.bufsize, &args->bufsize) ||
1894 put_user(compat_ptr(args32.buf), &args->buf))
1895 return -EFAULT;
1896
1897 return vchiq_ioctl(file, VCHIQ_IOC_DEQUEUE_MESSAGE,
1898 (unsigned long)args);
1899 }
1900
1901 struct vchiq_get_config32 {
1902 unsigned int config_size;
1903 compat_uptr_t pconfig;
1904 };
1905
1906 #define VCHIQ_IOC_GET_CONFIG32 \
1907 _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1908
1909 static long
1910 vchiq_compat_ioctl_get_config(struct file *file,
1911 unsigned int cmd,
1912 unsigned long arg)
1913 {
1914 struct vchiq_get_config __user *args;
1915 struct vchiq_get_config32 args32;
1916
1917 args = compat_alloc_user_space(sizeof(*args));
1918 if (!args)
1919 return -EFAULT;
1920
1921 if (copy_from_user(&args32,
1922 (struct vchiq_get_config32 __user *)arg,
1923 sizeof(args32)))
1924 return -EFAULT;
1925
1926 if (put_user(args32.config_size, &args->config_size) ||
1927 put_user(compat_ptr(args32.pconfig), &args->pconfig))
1928 return -EFAULT;
1929
1930 return vchiq_ioctl(file, VCHIQ_IOC_GET_CONFIG, (unsigned long)args);
1931 }
1932
1933 static long
1934 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1935 {
1936 switch (cmd) {
1937 case VCHIQ_IOC_CREATE_SERVICE32:
1938 return vchiq_compat_ioctl_create_service(file, cmd, arg);
1939 case VCHIQ_IOC_QUEUE_MESSAGE32:
1940 return vchiq_compat_ioctl_queue_message(file, cmd, arg);
1941 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1942 case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1943 return vchiq_compat_ioctl_queue_bulk(file, cmd, arg);
1944 case VCHIQ_IOC_AWAIT_COMPLETION32:
1945 return vchiq_compat_ioctl_await_completion(file, cmd, arg);
1946 case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1947 return vchiq_compat_ioctl_dequeue_message(file, cmd, arg);
1948 case VCHIQ_IOC_GET_CONFIG32:
1949 return vchiq_compat_ioctl_get_config(file, cmd, arg);
1950 default:
1951 return vchiq_ioctl(file, cmd, arg);
1952 }
1953 }
1954
1955 #endif
1956
1957 static int vchiq_open(struct inode *inode, struct file *file)
1958 {
1959 struct vchiq_state *state = vchiq_get_state();
1960 VCHIQ_INSTANCE_T instance;
1961
1962 vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1963
1964 if (!state) {
1965 vchiq_log_error(vchiq_arm_log_level,
1966 "vchiq has no connection to VideoCore");
1967 return -ENOTCONN;
1968 }
1969
1970 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1971 if (!instance)
1972 return -ENOMEM;
1973
1974 instance->state = state;
1975 instance->pid = current->tgid;
1976
1977 vchiq_debugfs_add_instance(instance);
1978
1979 init_completion(&instance->insert_event);
1980 init_completion(&instance->remove_event);
1981 mutex_init(&instance->completion_mutex);
1982 mutex_init(&instance->bulk_waiter_list_mutex);
1983 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1984
1985 file->private_data = instance;
1986
1987 return 0;
1988 }
1989
1990 static int vchiq_release(struct inode *inode, struct file *file)
1991 {
1992 VCHIQ_INSTANCE_T instance = file->private_data;
1993 struct vchiq_state *state = vchiq_get_state();
1994 struct vchiq_service *service;
1995 int ret = 0;
1996 int i;
1997
1998 vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__,
1999 (unsigned long)instance);
2000
2001 if (!state) {
2002 ret = -EPERM;
2003 goto out;
2004 }
2005
2006 /* Ensure videocore is awake to allow termination. */
2007 vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
2008
2009 mutex_lock(&instance->completion_mutex);
2010
2011 /* Wake the completion thread and ask it to exit */
2012 instance->closing = 1;
2013 complete(&instance->insert_event);
2014
2015 mutex_unlock(&instance->completion_mutex);
2016
2017 /* Wake the slot handler if the completion queue is full. */
2018 complete(&instance->remove_event);
2019
2020 /* Mark all services for termination... */
2021 i = 0;
2022 while ((service = next_service_by_instance(state, instance, &i))) {
2023 struct user_service *user_service = service->base.userdata;
2024
2025 /* Wake the slot handler if the msg queue is full. */
2026 complete(&user_service->remove_event);
2027
2028 vchiq_terminate_service_internal(service);
2029 unlock_service(service);
2030 }
2031
2032 /* ...and wait for them to die */
2033 i = 0;
2034 while ((service = next_service_by_instance(state, instance, &i))) {
2035 struct user_service *user_service = service->base.userdata;
2036
2037 wait_for_completion(&service->remove_event);
2038
2039 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
2040
2041 spin_lock(&msg_queue_spinlock);
2042
2043 while (user_service->msg_remove != user_service->msg_insert) {
2044 struct vchiq_header *header;
2045 int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
2046
2047 header = user_service->msg_queue[m];
2048 user_service->msg_remove++;
2049 spin_unlock(&msg_queue_spinlock);
2050
2051 if (header)
2052 vchiq_release_message(service->handle, header);
2053 spin_lock(&msg_queue_spinlock);
2054 }
2055
2056 spin_unlock(&msg_queue_spinlock);
2057
2058 unlock_service(service);
2059 }
2060
2061 /* Release any closed services */
2062 while (instance->completion_remove !=
2063 instance->completion_insert) {
2064 struct vchiq_completion_data *completion;
2065 struct vchiq_service *service;
2066
2067 completion = &instance->completions[
2068 instance->completion_remove & (MAX_COMPLETIONS - 1)];
2069 service = completion->service_userdata;
2070 if (completion->reason == VCHIQ_SERVICE_CLOSED) {
2071 struct user_service *user_service =
2072 service->base.userdata;
2073
2074 /* Wake any blocked user-thread */
2075 if (instance->use_close_delivered)
2076 complete(&user_service->close_event);
2077 unlock_service(service);
2078 }
2079 instance->completion_remove++;
2080 }
2081
2082 /* Release the PEER service count. */
2083 vchiq_release_internal(instance->state, NULL);
2084
2085 {
2086 struct bulk_waiter_node *waiter, *next;
2087
2088 list_for_each_entry_safe(waiter, next,
2089 &instance->bulk_waiter_list, list) {
2090 list_del(&waiter->list);
2091 vchiq_log_info(vchiq_arm_log_level,
2092 "bulk_waiter - cleaned up %pK for pid %d",
2093 waiter, waiter->pid);
2094 kfree(waiter);
2095 }
2096 }
2097
2098 vchiq_debugfs_remove_instance(instance);
2099
2100 kfree(instance);
2101 file->private_data = NULL;
2102
2103 out:
2104 return ret;
2105 }
2106
2107 /****************************************************************************
2108 *
2109 * vchiq_dump
2110 *
2111 ***************************************************************************/
2112
2113 void
2114 vchiq_dump(void *dump_context, const char *str, int len)
2115 {
2116 struct dump_context *context = (struct dump_context *)dump_context;
2117
2118 if (context->actual < context->space) {
2119 int copy_bytes;
2120
2121 if (context->offset > 0) {
2122 int skip_bytes = min(len, (int)context->offset);
2123
2124 str += skip_bytes;
2125 len -= skip_bytes;
2126 context->offset -= skip_bytes;
2127 if (context->offset > 0)
2128 return;
2129 }
2130 copy_bytes = min(len, (int)(context->space - context->actual));
2131 if (copy_bytes == 0)
2132 return;
2133 if (copy_to_user(context->buf + context->actual, str,
2134 copy_bytes))
2135 context->actual = -EFAULT;
2136 context->actual += copy_bytes;
2137 len -= copy_bytes;
2138
2139 /* If tne terminating NUL is included in the length, then it
2140 ** marks the end of a line and should be replaced with a
2141 ** carriage return. */
2142 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
2143 char cr = '\n';
2144
2145 if (copy_to_user(context->buf + context->actual - 1,
2146 &cr, 1))
2147 context->actual = -EFAULT;
2148 }
2149 }
2150 }
2151
2152 /****************************************************************************
2153 *
2154 * vchiq_dump_platform_instance_state
2155 *
2156 ***************************************************************************/
2157
2158 void
2159 vchiq_dump_platform_instances(void *dump_context)
2160 {
2161 struct vchiq_state *state = vchiq_get_state();
2162 char buf[80];
2163 int len;
2164 int i;
2165
2166 /* There is no list of instances, so instead scan all services,
2167 marking those that have been dumped. */
2168
2169 for (i = 0; i < state->unused_service; i++) {
2170 struct vchiq_service *service = state->services[i];
2171 VCHIQ_INSTANCE_T instance;
2172
2173 if (service && (service->base.callback == service_callback)) {
2174 instance = service->instance;
2175 if (instance)
2176 instance->mark = 0;
2177 }
2178 }
2179
2180 for (i = 0; i < state->unused_service; i++) {
2181 struct vchiq_service *service = state->services[i];
2182 VCHIQ_INSTANCE_T instance;
2183
2184 if (service && (service->base.callback == service_callback)) {
2185 instance = service->instance;
2186 if (instance && !instance->mark) {
2187 len = snprintf(buf, sizeof(buf),
2188 "Instance %pK: pid %d,%s completions %d/%d",
2189 instance, instance->pid,
2190 instance->connected ? " connected, " :
2191 "",
2192 instance->completion_insert -
2193 instance->completion_remove,
2194 MAX_COMPLETIONS);
2195
2196 vchiq_dump(dump_context, buf, len + 1);
2197
2198 instance->mark = 1;
2199 }
2200 }
2201 }
2202 }
2203
2204 /****************************************************************************
2205 *
2206 * vchiq_dump_platform_service_state
2207 *
2208 ***************************************************************************/
2209
2210 void
2211 vchiq_dump_platform_service_state(void *dump_context,
2212 struct vchiq_service *service)
2213 {
2214 struct user_service *user_service =
2215 (struct user_service *)service->base.userdata;
2216 char buf[80];
2217 int len;
2218
2219 len = snprintf(buf, sizeof(buf), " instance %pK", service->instance);
2220
2221 if ((service->base.callback == service_callback) &&
2222 user_service->is_vchi) {
2223 len += snprintf(buf + len, sizeof(buf) - len,
2224 ", %d/%d messages",
2225 user_service->msg_insert - user_service->msg_remove,
2226 MSG_QUEUE_SIZE);
2227
2228 if (user_service->dequeue_pending)
2229 len += snprintf(buf + len, sizeof(buf) - len,
2230 " (dequeue pending)");
2231 }
2232
2233 vchiq_dump(dump_context, buf, len + 1);
2234 }
2235
2236 /****************************************************************************
2237 *
2238 * vchiq_read
2239 *
2240 ***************************************************************************/
2241
2242 static ssize_t
2243 vchiq_read(struct file *file, char __user *buf,
2244 size_t count, loff_t *ppos)
2245 {
2246 struct dump_context context;
2247
2248 context.buf = buf;
2249 context.actual = 0;
2250 context.space = count;
2251 context.offset = *ppos;
2252
2253 vchiq_dump_state(&context, &g_state);
2254
2255 *ppos += context.actual;
2256
2257 return context.actual;
2258 }
2259
2260 struct vchiq_state *
2261 vchiq_get_state(void)
2262 {
2263
2264 if (g_state.remote == NULL)
2265 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
2266 else if (g_state.remote->initialised != 1)
2267 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
2268 __func__, g_state.remote->initialised);
2269
2270 return ((g_state.remote != NULL) &&
2271 (g_state.remote->initialised == 1)) ? &g_state : NULL;
2272 }
2273
2274 static const struct file_operations
2275 vchiq_fops = {
2276 .owner = THIS_MODULE,
2277 .unlocked_ioctl = vchiq_ioctl,
2278 #if defined(CONFIG_COMPAT)
2279 .compat_ioctl = vchiq_compat_ioctl,
2280 #endif
2281 .open = vchiq_open,
2282 .release = vchiq_release,
2283 .read = vchiq_read
2284 };
2285
2286 /*
2287 * Autosuspend related functionality
2288 */
2289
2290 int
2291 vchiq_videocore_wanted(struct vchiq_state *state)
2292 {
2293 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2294
2295 if (!arm_state)
2296 /* autosuspend not supported - always return wanted */
2297 return 1;
2298 else if (arm_state->blocked_count)
2299 return 1;
2300 else if (!arm_state->videocore_use_count)
2301 /* usage count zero - check for override unless we're forcing */
2302 if (arm_state->resume_blocked)
2303 return 0;
2304 else
2305 return vchiq_platform_videocore_wanted(state);
2306 else
2307 /* non-zero usage count - videocore still required */
2308 return 1;
2309 }
2310
2311 static VCHIQ_STATUS_T
2312 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
2313 struct vchiq_header *header,
2314 VCHIQ_SERVICE_HANDLE_T service_user,
2315 void *bulk_user)
2316 {
2317 vchiq_log_error(vchiq_susp_log_level,
2318 "%s callback reason %d", __func__, reason);
2319 return 0;
2320 }
2321
2322 static int
2323 vchiq_keepalive_thread_func(void *v)
2324 {
2325 struct vchiq_state *state = (struct vchiq_state *)v;
2326 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2327
2328 VCHIQ_STATUS_T status;
2329 VCHIQ_INSTANCE_T instance;
2330 VCHIQ_SERVICE_HANDLE_T ka_handle;
2331
2332 struct vchiq_service_params params = {
2333 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
2334 .callback = vchiq_keepalive_vchiq_callback,
2335 .version = KEEPALIVE_VER,
2336 .version_min = KEEPALIVE_VER_MIN
2337 };
2338
2339 status = vchiq_initialise(&instance);
2340 if (status != VCHIQ_SUCCESS) {
2341 vchiq_log_error(vchiq_susp_log_level,
2342 "%s vchiq_initialise failed %d", __func__, status);
2343 goto exit;
2344 }
2345
2346 status = vchiq_connect(instance);
2347 if (status != VCHIQ_SUCCESS) {
2348 vchiq_log_error(vchiq_susp_log_level,
2349 "%s vchiq_connect failed %d", __func__, status);
2350 goto shutdown;
2351 }
2352
2353 status = vchiq_add_service(instance, &params, &ka_handle);
2354 if (status != VCHIQ_SUCCESS) {
2355 vchiq_log_error(vchiq_susp_log_level,
2356 "%s vchiq_open_service failed %d", __func__, status);
2357 goto shutdown;
2358 }
2359
2360 while (1) {
2361 long rc = 0, uc = 0;
2362
2363 if (wait_for_completion_killable(&arm_state->ka_evt)
2364 != 0) {
2365 vchiq_log_error(vchiq_susp_log_level,
2366 "%s interrupted", __func__);
2367 flush_signals(current);
2368 continue;
2369 }
2370
2371 /* read and clear counters. Do release_count then use_count to
2372 * prevent getting more releases than uses */
2373 rc = atomic_xchg(&arm_state->ka_release_count, 0);
2374 uc = atomic_xchg(&arm_state->ka_use_count, 0);
2375
2376 /* Call use/release service the requisite number of times.
2377 * Process use before release so use counts don't go negative */
2378 while (uc--) {
2379 atomic_inc(&arm_state->ka_use_ack_count);
2380 status = vchiq_use_service(ka_handle);
2381 if (status != VCHIQ_SUCCESS) {
2382 vchiq_log_error(vchiq_susp_log_level,
2383 "%s vchiq_use_service error %d",
2384 __func__, status);
2385 }
2386 }
2387 while (rc--) {
2388 status = vchiq_release_service(ka_handle);
2389 if (status != VCHIQ_SUCCESS) {
2390 vchiq_log_error(vchiq_susp_log_level,
2391 "%s vchiq_release_service error %d",
2392 __func__, status);
2393 }
2394 }
2395 }
2396
2397 shutdown:
2398 vchiq_shutdown(instance);
2399 exit:
2400 return 0;
2401 }
2402
2403 VCHIQ_STATUS_T
2404 vchiq_arm_init_state(struct vchiq_state *state,
2405 struct vchiq_arm_state *arm_state)
2406 {
2407 if (arm_state) {
2408 rwlock_init(&arm_state->susp_res_lock);
2409
2410 init_completion(&arm_state->ka_evt);
2411 atomic_set(&arm_state->ka_use_count, 0);
2412 atomic_set(&arm_state->ka_use_ack_count, 0);
2413 atomic_set(&arm_state->ka_release_count, 0);
2414
2415 init_completion(&arm_state->vc_suspend_complete);
2416
2417 init_completion(&arm_state->vc_resume_complete);
2418 /* Initialise to 'done' state. We only want to block on resume
2419 * completion while videocore is suspended. */
2420 set_resume_state(arm_state, VC_RESUME_RESUMED);
2421
2422 init_completion(&arm_state->resume_blocker);
2423 /* Initialise to 'done' state. We only want to block on this
2424 * completion while resume is blocked */
2425 complete_all(&arm_state->resume_blocker);
2426
2427 init_completion(&arm_state->blocked_blocker);
2428 /* Initialise to 'done' state. We only want to block on this
2429 * completion while things are waiting on the resume blocker */
2430 complete_all(&arm_state->blocked_blocker);
2431
2432 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
2433 arm_state->suspend_timer_running = 0;
2434 arm_state->state = state;
2435 timer_setup(&arm_state->suspend_timer, suspend_timer_callback,
2436 0);
2437
2438 arm_state->first_connect = 0;
2439
2440 }
2441 return VCHIQ_SUCCESS;
2442 }
2443
2444 /*
2445 ** Functions to modify the state variables;
2446 ** set_suspend_state
2447 ** set_resume_state
2448 **
2449 ** There are more state variables than we might like, so ensure they remain in
2450 ** step. Suspend and resume state are maintained separately, since most of
2451 ** these state machines can operate independently. However, there are a few
2452 ** states where state transitions in one state machine cause a reset to the
2453 ** other state machine. In addition, there are some completion events which
2454 ** need to occur on state machine reset and end-state(s), so these are also
2455 ** dealt with in these functions.
2456 **
2457 ** In all states we set the state variable according to the input, but in some
2458 ** cases we perform additional steps outlined below;
2459 **
2460 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
2461 ** The suspend completion is completed after any suspend
2462 ** attempt. When we reset the state machine we also reset
2463 ** the completion. This reset occurs when videocore is
2464 ** resumed, and also if we initiate suspend after a suspend
2465 ** failure.
2466 **
2467 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
2468 ** suspend - ie from this point on we must try to suspend
2469 ** before resuming can occur. We therefore also reset the
2470 ** resume state machine to VC_RESUME_IDLE in this state.
2471 **
2472 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
2473 ** complete_all on the suspend completion to notify
2474 ** anything waiting for suspend to happen.
2475 **
2476 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
2477 ** initiate resume, so no need to alter resume state.
2478 ** We call complete_all on the suspend completion to notify
2479 ** of suspend rejection.
2480 **
2481 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
2482 ** suspend completion and reset the resume state machine.
2483 **
2484 ** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
2485 ** resume completion is in it's 'done' state whenever
2486 ** videcore is running. Therefore, the VC_RESUME_IDLE
2487 ** state implies that videocore is suspended.
2488 ** Hence, any thread which needs to wait until videocore is
2489 ** running can wait on this completion - it will only block
2490 ** if videocore is suspended.
2491 **
2492 ** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
2493 ** Call complete_all on the resume completion to unblock
2494 ** any threads waiting for resume. Also reset the suspend
2495 ** state machine to it's idle state.
2496 **
2497 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
2498 */
2499
2500 void
2501 set_suspend_state(struct vchiq_arm_state *arm_state,
2502 enum vc_suspend_status new_state)
2503 {
2504 /* set the state in all cases */
2505 arm_state->vc_suspend_state = new_state;
2506
2507 /* state specific additional actions */
2508 switch (new_state) {
2509 case VC_SUSPEND_FORCE_CANCELED:
2510 complete_all(&arm_state->vc_suspend_complete);
2511 break;
2512 case VC_SUSPEND_REJECTED:
2513 complete_all(&arm_state->vc_suspend_complete);
2514 break;
2515 case VC_SUSPEND_FAILED:
2516 complete_all(&arm_state->vc_suspend_complete);
2517 arm_state->vc_resume_state = VC_RESUME_RESUMED;
2518 complete_all(&arm_state->vc_resume_complete);
2519 break;
2520 case VC_SUSPEND_IDLE:
2521 reinit_completion(&arm_state->vc_suspend_complete);
2522 break;
2523 case VC_SUSPEND_REQUESTED:
2524 break;
2525 case VC_SUSPEND_IN_PROGRESS:
2526 set_resume_state(arm_state, VC_RESUME_IDLE);
2527 break;
2528 case VC_SUSPEND_SUSPENDED:
2529 complete_all(&arm_state->vc_suspend_complete);
2530 break;
2531 default:
2532 BUG();
2533 break;
2534 }
2535 }
2536
2537 void
2538 set_resume_state(struct vchiq_arm_state *arm_state,
2539 enum vc_resume_status new_state)
2540 {
2541 /* set the state in all cases */
2542 arm_state->vc_resume_state = new_state;
2543
2544 /* state specific additional actions */
2545 switch (new_state) {
2546 case VC_RESUME_FAILED:
2547 break;
2548 case VC_RESUME_IDLE:
2549 reinit_completion(&arm_state->vc_resume_complete);
2550 break;
2551 case VC_RESUME_REQUESTED:
2552 break;
2553 case VC_RESUME_IN_PROGRESS:
2554 break;
2555 case VC_RESUME_RESUMED:
2556 complete_all(&arm_state->vc_resume_complete);
2557 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2558 break;
2559 default:
2560 BUG();
2561 break;
2562 }
2563 }
2564
2565 /* should be called with the write lock held */
2566 inline void
2567 start_suspend_timer(struct vchiq_arm_state *arm_state)
2568 {
2569 del_timer(&arm_state->suspend_timer);
2570 arm_state->suspend_timer.expires = jiffies +
2571 msecs_to_jiffies(arm_state->suspend_timer_timeout);
2572 add_timer(&arm_state->suspend_timer);
2573 arm_state->suspend_timer_running = 1;
2574 }
2575
2576 /* should be called with the write lock held */
2577 static inline void
2578 stop_suspend_timer(struct vchiq_arm_state *arm_state)
2579 {
2580 if (arm_state->suspend_timer_running) {
2581 del_timer(&arm_state->suspend_timer);
2582 arm_state->suspend_timer_running = 0;
2583 }
2584 }
2585
2586 static inline int
2587 need_resume(struct vchiq_state *state)
2588 {
2589 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2590
2591 return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
2592 (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
2593 vchiq_videocore_wanted(state);
2594 }
2595
2596 static int
2597 block_resume(struct vchiq_arm_state *arm_state)
2598 {
2599 int status = VCHIQ_SUCCESS;
2600 const unsigned long timeout_val =
2601 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
2602 int resume_count = 0;
2603
2604 /* Allow any threads which were blocked by the last force suspend to
2605 * complete if they haven't already. Only give this one shot; if
2606 * blocked_count is incremented after blocked_blocker is completed
2607 * (which only happens when blocked_count hits 0) then those threads
2608 * will have to wait until next time around */
2609 if (arm_state->blocked_count) {
2610 reinit_completion(&arm_state->blocked_blocker);
2611 write_unlock_bh(&arm_state->susp_res_lock);
2612 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
2613 "blocked clients", __func__);
2614 if (wait_for_completion_killable_timeout(
2615 &arm_state->blocked_blocker, timeout_val)
2616 <= 0) {
2617 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
2618 "previously blocked clients failed", __func__);
2619 status = VCHIQ_ERROR;
2620 write_lock_bh(&arm_state->susp_res_lock);
2621 goto out;
2622 }
2623 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
2624 "clients resumed", __func__);
2625 write_lock_bh(&arm_state->susp_res_lock);
2626 }
2627
2628 /* We need to wait for resume to complete if it's in process */
2629 while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
2630 arm_state->vc_resume_state > VC_RESUME_IDLE) {
2631 if (resume_count > 1) {
2632 status = VCHIQ_ERROR;
2633 vchiq_log_error(vchiq_susp_log_level, "%s waited too "
2634 "many times for resume", __func__);
2635 goto out;
2636 }
2637 write_unlock_bh(&arm_state->susp_res_lock);
2638 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
2639 __func__);
2640 if (wait_for_completion_killable_timeout(
2641 &arm_state->vc_resume_complete, timeout_val)
2642 <= 0) {
2643 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
2644 "resume failed (%s)", __func__,
2645 resume_state_names[arm_state->vc_resume_state +
2646 VC_RESUME_NUM_OFFSET]);
2647 status = VCHIQ_ERROR;
2648 write_lock_bh(&arm_state->susp_res_lock);
2649 goto out;
2650 }
2651 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
2652 write_lock_bh(&arm_state->susp_res_lock);
2653 resume_count++;
2654 }
2655 reinit_completion(&arm_state->resume_blocker);
2656 arm_state->resume_blocked = 1;
2657
2658 out:
2659 return status;
2660 }
2661
2662 static inline void
2663 unblock_resume(struct vchiq_arm_state *arm_state)
2664 {
2665 complete_all(&arm_state->resume_blocker);
2666 arm_state->resume_blocked = 0;
2667 }
2668
2669 /* Initiate suspend via slot handler. Should be called with the write lock
2670 * held */
2671 VCHIQ_STATUS_T
2672 vchiq_arm_vcsuspend(struct vchiq_state *state)
2673 {
2674 VCHIQ_STATUS_T status = VCHIQ_ERROR;
2675 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2676
2677 if (!arm_state)
2678 goto out;
2679
2680 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2681 status = VCHIQ_SUCCESS;
2682
2683 switch (arm_state->vc_suspend_state) {
2684 case VC_SUSPEND_REQUESTED:
2685 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
2686 "requested", __func__);
2687 break;
2688 case VC_SUSPEND_IN_PROGRESS:
2689 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
2690 "progress", __func__);
2691 break;
2692
2693 default:
2694 /* We don't expect to be in other states, so log but continue
2695 * anyway */
2696 vchiq_log_error(vchiq_susp_log_level,
2697 "%s unexpected suspend state %s", __func__,
2698 suspend_state_names[arm_state->vc_suspend_state +
2699 VC_SUSPEND_NUM_OFFSET]);
2700 /* fall through */
2701 case VC_SUSPEND_REJECTED:
2702 case VC_SUSPEND_FAILED:
2703 /* Ensure any idle state actions have been run */
2704 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2705 /* fall through */
2706 case VC_SUSPEND_IDLE:
2707 vchiq_log_info(vchiq_susp_log_level,
2708 "%s: suspending", __func__);
2709 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
2710 /* kick the slot handler thread to initiate suspend */
2711 request_poll(state, NULL, 0);
2712 break;
2713 }
2714
2715 out:
2716 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2717 return status;
2718 }
2719
2720 void
2721 vchiq_platform_check_suspend(struct vchiq_state *state)
2722 {
2723 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2724 int susp = 0;
2725
2726 if (!arm_state)
2727 goto out;
2728
2729 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2730
2731 write_lock_bh(&arm_state->susp_res_lock);
2732 if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2733 arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2734 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2735 susp = 1;
2736 }
2737 write_unlock_bh(&arm_state->susp_res_lock);
2738
2739 if (susp)
2740 vchiq_platform_suspend(state);
2741
2742 out:
2743 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2744 return;
2745 }
2746
2747 static void
2748 output_timeout_error(struct vchiq_state *state)
2749 {
2750 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2751 char err[50] = "";
2752 int vc_use_count = arm_state->videocore_use_count;
2753 int active_services = state->unused_service;
2754 int i;
2755
2756 if (!arm_state->videocore_use_count) {
2757 snprintf(err, sizeof(err), " Videocore usecount is 0");
2758 goto output_msg;
2759 }
2760 for (i = 0; i < active_services; i++) {
2761 struct vchiq_service *service_ptr = state->services[i];
2762
2763 if (service_ptr && service_ptr->service_use_count &&
2764 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
2765 snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
2766 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2767 service_ptr->base.fourcc),
2768 service_ptr->client_id,
2769 service_ptr->service_use_count,
2770 service_ptr->service_use_count ==
2771 vc_use_count ? "" : " (+ more)");
2772 break;
2773 }
2774 }
2775
2776 output_msg:
2777 vchiq_log_error(vchiq_susp_log_level,
2778 "timed out waiting for vc suspend (%d).%s",
2779 arm_state->autosuspend_override, err);
2780
2781 }
2782
2783 /* Try to get videocore into suspended state, regardless of autosuspend state.
2784 ** We don't actually force suspend, since videocore may get into a bad state
2785 ** if we force suspend at a bad time. Instead, we wait for autosuspend to
2786 ** determine a good point to suspend. If this doesn't happen within 100ms we
2787 ** report failure.
2788 **
2789 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2790 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2791 */
2792 VCHIQ_STATUS_T
2793 vchiq_arm_force_suspend(struct vchiq_state *state)
2794 {
2795 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2796 VCHIQ_STATUS_T status = VCHIQ_ERROR;
2797 long rc = 0;
2798 int repeat = -1;
2799
2800 if (!arm_state)
2801 goto out;
2802
2803 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2804
2805 write_lock_bh(&arm_state->susp_res_lock);
2806
2807 status = block_resume(arm_state);
2808 if (status != VCHIQ_SUCCESS)
2809 goto unlock;
2810 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2811 /* Already suspended - just block resume and exit */
2812 vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
2813 __func__);
2814 status = VCHIQ_SUCCESS;
2815 goto unlock;
2816 } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
2817 /* initiate suspend immediately in the case that we're waiting
2818 * for the timeout */
2819 stop_suspend_timer(arm_state);
2820 if (!vchiq_videocore_wanted(state)) {
2821 vchiq_log_info(vchiq_susp_log_level, "%s videocore "
2822 "idle, initiating suspend", __func__);
2823 status = vchiq_arm_vcsuspend(state);
2824 } else if (arm_state->autosuspend_override <
2825 FORCE_SUSPEND_FAIL_MAX) {
2826 vchiq_log_info(vchiq_susp_log_level, "%s letting "
2827 "videocore go idle", __func__);
2828 status = VCHIQ_SUCCESS;
2829 } else {
2830 vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
2831 "many times - attempting suspend", __func__);
2832 status = vchiq_arm_vcsuspend(state);
2833 }
2834 } else {
2835 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
2836 "in progress - wait for completion", __func__);
2837 status = VCHIQ_SUCCESS;
2838 }
2839
2840 /* Wait for suspend to happen due to system idle (not forced..) */
2841 if (status != VCHIQ_SUCCESS)
2842 goto unblock_resume;
2843
2844 do {
2845 write_unlock_bh(&arm_state->susp_res_lock);
2846
2847 rc = wait_for_completion_killable_timeout(
2848 &arm_state->vc_suspend_complete,
2849 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
2850
2851 write_lock_bh(&arm_state->susp_res_lock);
2852 if (rc < 0) {
2853 vchiq_log_warning(vchiq_susp_log_level, "%s "
2854 "interrupted waiting for suspend", __func__);
2855 status = VCHIQ_ERROR;
2856 goto unblock_resume;
2857 } else if (rc == 0) {
2858 if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
2859 /* Repeat timeout once if in progress */
2860 if (repeat < 0) {
2861 repeat = 1;
2862 continue;
2863 }
2864 }
2865 arm_state->autosuspend_override++;
2866 output_timeout_error(state);
2867
2868 status = VCHIQ_RETRY;
2869 goto unblock_resume;
2870 }
2871 } while (0 < (repeat--));
2872
2873 /* Check and report state in case we need to abort ARM suspend */
2874 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
2875 status = VCHIQ_RETRY;
2876 vchiq_log_error(vchiq_susp_log_level,
2877 "%s videocore suspend failed (state %s)", __func__,
2878 suspend_state_names[arm_state->vc_suspend_state +
2879 VC_SUSPEND_NUM_OFFSET]);
2880 /* Reset the state only if it's still in an error state.
2881 * Something could have already initiated another suspend. */
2882 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
2883 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2884
2885 goto unblock_resume;
2886 }
2887
2888 /* successfully suspended - unlock and exit */
2889 goto unlock;
2890
2891 unblock_resume:
2892 /* all error states need to unblock resume before exit */
2893 unblock_resume(arm_state);
2894
2895 unlock:
2896 write_unlock_bh(&arm_state->susp_res_lock);
2897
2898 out:
2899 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2900 return status;
2901 }
2902
2903 void
2904 vchiq_check_suspend(struct vchiq_state *state)
2905 {
2906 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2907
2908 if (!arm_state)
2909 goto out;
2910
2911 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2912
2913 write_lock_bh(&arm_state->susp_res_lock);
2914 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2915 arm_state->first_connect &&
2916 !vchiq_videocore_wanted(state)) {
2917 vchiq_arm_vcsuspend(state);
2918 }
2919 write_unlock_bh(&arm_state->susp_res_lock);
2920
2921 out:
2922 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2923 }
2924
2925 int
2926 vchiq_arm_allow_resume(struct vchiq_state *state)
2927 {
2928 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2929 int resume = 0;
2930 int ret = -1;
2931
2932 if (!arm_state)
2933 goto out;
2934
2935 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2936
2937 write_lock_bh(&arm_state->susp_res_lock);
2938 unblock_resume(arm_state);
2939 resume = vchiq_check_resume(state);
2940 write_unlock_bh(&arm_state->susp_res_lock);
2941
2942 if (resume) {
2943 if (wait_for_completion_killable(
2944 &arm_state->vc_resume_complete) < 0) {
2945 vchiq_log_error(vchiq_susp_log_level,
2946 "%s interrupted", __func__);
2947 /* failed, cannot accurately derive suspend
2948 * state, so exit early. */
2949 goto out;
2950 }
2951 }
2952
2953 read_lock_bh(&arm_state->susp_res_lock);
2954 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2955 vchiq_log_info(vchiq_susp_log_level,
2956 "%s: Videocore remains suspended", __func__);
2957 } else {
2958 vchiq_log_info(vchiq_susp_log_level,
2959 "%s: Videocore resumed", __func__);
2960 ret = 0;
2961 }
2962 read_unlock_bh(&arm_state->susp_res_lock);
2963 out:
2964 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2965 return ret;
2966 }
2967
2968 /* This function should be called with the write lock held */
2969 int
2970 vchiq_check_resume(struct vchiq_state *state)
2971 {
2972 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2973 int resume = 0;
2974
2975 if (!arm_state)
2976 goto out;
2977
2978 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2979
2980 if (need_resume(state)) {
2981 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2982 request_poll(state, NULL, 0);
2983 resume = 1;
2984 }
2985
2986 out:
2987 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2988 return resume;
2989 }
2990
2991 VCHIQ_STATUS_T
2992 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
2993 enum USE_TYPE_E use_type)
2994 {
2995 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2996 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2997 char entity[16];
2998 int *entity_uc;
2999 int local_uc, local_entity_uc;
3000
3001 if (!arm_state)
3002 goto out;
3003
3004 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3005
3006 if (use_type == USE_TYPE_VCHIQ) {
3007 sprintf(entity, "VCHIQ: ");
3008 entity_uc = &arm_state->peer_use_count;
3009 } else if (service) {
3010 sprintf(entity, "%c%c%c%c:%03d",
3011 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
3012 service->client_id);
3013 entity_uc = &service->service_use_count;
3014 } else {
3015 vchiq_log_error(vchiq_susp_log_level, "%s null service "
3016 "ptr", __func__);
3017 ret = VCHIQ_ERROR;
3018 goto out;
3019 }
3020
3021 write_lock_bh(&arm_state->susp_res_lock);
3022 while (arm_state->resume_blocked) {
3023 /* If we call 'use' while force suspend is waiting for suspend,
3024 * then we're about to block the thread which the force is
3025 * waiting to complete, so we're bound to just time out. In this
3026 * case, set the suspend state such that the wait will be
3027 * canceled, so we can complete as quickly as possible. */
3028 if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
3029 VC_SUSPEND_IDLE) {
3030 set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
3031 break;
3032 }
3033 /* If suspend is already in progress then we need to block */
3034 if (!try_wait_for_completion(&arm_state->resume_blocker)) {
3035 /* Indicate that there are threads waiting on the resume
3036 * blocker. These need to be allowed to complete before
3037 * a _second_ call to force suspend can complete,
3038 * otherwise low priority threads might never actually
3039 * continue */
3040 arm_state->blocked_count++;
3041 write_unlock_bh(&arm_state->susp_res_lock);
3042 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
3043 "blocked - waiting...", __func__, entity);
3044 if (wait_for_completion_killable(
3045 &arm_state->resume_blocker) != 0) {
3046 vchiq_log_error(vchiq_susp_log_level, "%s %s "
3047 "wait for resume blocker interrupted",
3048 __func__, entity);
3049 ret = VCHIQ_ERROR;
3050 write_lock_bh(&arm_state->susp_res_lock);
3051 arm_state->blocked_count--;
3052 write_unlock_bh(&arm_state->susp_res_lock);
3053 goto out;
3054 }
3055 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
3056 "unblocked", __func__, entity);
3057 write_lock_bh(&arm_state->susp_res_lock);
3058 if (--arm_state->blocked_count == 0)
3059 complete_all(&arm_state->blocked_blocker);
3060 }
3061 }
3062
3063 stop_suspend_timer(arm_state);
3064
3065 local_uc = ++arm_state->videocore_use_count;
3066 local_entity_uc = ++(*entity_uc);
3067
3068 /* If there's a pending request which hasn't yet been serviced then
3069 * just clear it. If we're past VC_SUSPEND_REQUESTED state then
3070 * vc_resume_complete will block until we either resume or fail to
3071 * suspend */
3072 if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
3073 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
3074
3075 if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
3076 set_resume_state(arm_state, VC_RESUME_REQUESTED);
3077 vchiq_log_info(vchiq_susp_log_level,
3078 "%s %s count %d, state count %d",
3079 __func__, entity, local_entity_uc, local_uc);
3080 request_poll(state, NULL, 0);
3081 } else
3082 vchiq_log_trace(vchiq_susp_log_level,
3083 "%s %s count %d, state count %d",
3084 __func__, entity, *entity_uc, local_uc);
3085
3086 write_unlock_bh(&arm_state->susp_res_lock);
3087
3088 /* Completion is in a done state when we're not suspended, so this won't
3089 * block for the non-suspended case. */
3090 if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
3091 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
3092 __func__, entity);
3093 if (wait_for_completion_killable(
3094 &arm_state->vc_resume_complete) != 0) {
3095 vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
3096 "resume interrupted", __func__, entity);
3097 ret = VCHIQ_ERROR;
3098 goto out;
3099 }
3100 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
3101 entity);
3102 }
3103
3104 if (ret == VCHIQ_SUCCESS) {
3105 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
3106 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
3107
3108 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
3109 /* Send the use notify to videocore */
3110 status = vchiq_send_remote_use_active(state);
3111 if (status == VCHIQ_SUCCESS)
3112 ack_cnt--;
3113 else
3114 atomic_add(ack_cnt,
3115 &arm_state->ka_use_ack_count);
3116 }
3117 }
3118
3119 out:
3120 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
3121 return ret;
3122 }
3123
3124 VCHIQ_STATUS_T
3125 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
3126 {
3127 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
3128 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
3129 char entity[16];
3130 int *entity_uc;
3131 int local_uc, local_entity_uc;
3132
3133 if (!arm_state)
3134 goto out;
3135
3136 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3137
3138 if (service) {
3139 sprintf(entity, "%c%c%c%c:%03d",
3140 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
3141 service->client_id);
3142 entity_uc = &service->service_use_count;
3143 } else {
3144 sprintf(entity, "PEER: ");
3145 entity_uc = &arm_state->peer_use_count;
3146 }
3147
3148 write_lock_bh(&arm_state->susp_res_lock);
3149 if (!arm_state->videocore_use_count || !(*entity_uc)) {
3150 /* Don't use BUG_ON - don't allow user thread to crash kernel */
3151 WARN_ON(!arm_state->videocore_use_count);
3152 WARN_ON(!(*entity_uc));
3153 ret = VCHIQ_ERROR;
3154 goto unlock;
3155 }
3156 local_uc = --arm_state->videocore_use_count;
3157 local_entity_uc = --(*entity_uc);
3158
3159 if (!vchiq_videocore_wanted(state)) {
3160 if (vchiq_platform_use_suspend_timer() &&
3161 !arm_state->resume_blocked) {
3162 /* Only use the timer if we're not trying to force
3163 * suspend (=> resume_blocked) */
3164 start_suspend_timer(arm_state);
3165 } else {
3166 vchiq_log_info(vchiq_susp_log_level,
3167 "%s %s count %d, state count %d - suspending",
3168 __func__, entity, *entity_uc,
3169 arm_state->videocore_use_count);
3170 vchiq_arm_vcsuspend(state);
3171 }
3172 } else
3173 vchiq_log_trace(vchiq_susp_log_level,
3174 "%s %s count %d, state count %d",
3175 __func__, entity, *entity_uc,
3176 arm_state->videocore_use_count);
3177
3178 unlock:
3179 write_unlock_bh(&arm_state->susp_res_lock);
3180
3181 out:
3182 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
3183 return ret;
3184 }
3185
3186 void
3187 vchiq_on_remote_use(struct vchiq_state *state)
3188 {
3189 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
3190
3191 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3192 atomic_inc(&arm_state->ka_use_count);
3193 complete(&arm_state->ka_evt);
3194 }
3195
3196 void
3197 vchiq_on_remote_release(struct vchiq_state *state)
3198 {
3199 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
3200
3201 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3202 atomic_inc(&arm_state->ka_release_count);
3203 complete(&arm_state->ka_evt);
3204 }
3205
3206 VCHIQ_STATUS_T
3207 vchiq_use_service_internal(struct vchiq_service *service)
3208 {
3209 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
3210 }
3211
3212 VCHIQ_STATUS_T
3213 vchiq_release_service_internal(struct vchiq_service *service)
3214 {
3215 return vchiq_release_internal(service->state, service);
3216 }
3217
3218 struct vchiq_debugfs_node *
3219 vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
3220 {
3221 return &instance->debugfs_node;
3222 }
3223
3224 int
3225 vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
3226 {
3227 struct vchiq_service *service;
3228 int use_count = 0, i;
3229
3230 i = 0;
3231 while ((service = next_service_by_instance(instance->state,
3232 instance, &i)) != NULL) {
3233 use_count += service->service_use_count;
3234 unlock_service(service);
3235 }
3236 return use_count;
3237 }
3238
3239 int
3240 vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
3241 {
3242 return instance->pid;
3243 }
3244
3245 int
3246 vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
3247 {
3248 return instance->trace;
3249 }
3250
3251 void
3252 vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
3253 {
3254 struct vchiq_service *service;
3255 int i;
3256
3257 i = 0;
3258 while ((service = next_service_by_instance(instance->state,
3259 instance, &i)) != NULL) {
3260 service->trace = trace;
3261 unlock_service(service);
3262 }
3263 instance->trace = (trace != 0);
3264 }
3265
3266 static void suspend_timer_callback(struct timer_list *t)
3267 {
3268 struct vchiq_arm_state *arm_state =
3269 from_timer(arm_state, t, suspend_timer);
3270 struct vchiq_state *state = arm_state->state;
3271
3272 vchiq_log_info(vchiq_susp_log_level,
3273 "%s - suspend timer expired - check suspend", __func__);
3274 vchiq_check_suspend(state);
3275 }
3276
3277 VCHIQ_STATUS_T
3278 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
3279 {
3280 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3281 struct vchiq_service *service = find_service_by_handle(handle);
3282
3283 if (service) {
3284 ret = vchiq_use_internal(service->state, service,
3285 USE_TYPE_SERVICE_NO_RESUME);
3286 unlock_service(service);
3287 }
3288 return ret;
3289 }
3290
3291 VCHIQ_STATUS_T
3292 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
3293 {
3294 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3295 struct vchiq_service *service = find_service_by_handle(handle);
3296
3297 if (service) {
3298 ret = vchiq_use_internal(service->state, service,
3299 USE_TYPE_SERVICE);
3300 unlock_service(service);
3301 }
3302 return ret;
3303 }
3304
3305 VCHIQ_STATUS_T
3306 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
3307 {
3308 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3309 struct vchiq_service *service = find_service_by_handle(handle);
3310
3311 if (service) {
3312 ret = vchiq_release_internal(service->state, service);
3313 unlock_service(service);
3314 }
3315 return ret;
3316 }
3317
3318 struct service_data_struct {
3319 int fourcc;
3320 int clientid;
3321 int use_count;
3322 };
3323
3324 void
3325 vchiq_dump_service_use_state(struct vchiq_state *state)
3326 {
3327 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
3328 struct service_data_struct *service_data;
3329 int i, found = 0;
3330 /* If there's more than 64 services, only dump ones with
3331 * non-zero counts */
3332 int only_nonzero = 0;
3333 static const char *nz = "<-- preventing suspend";
3334
3335 enum vc_suspend_status vc_suspend_state;
3336 enum vc_resume_status vc_resume_state;
3337 int peer_count;
3338 int vc_use_count;
3339 int active_services;
3340
3341 if (!arm_state)
3342 return;
3343
3344 service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
3345 GFP_KERNEL);
3346 if (!service_data)
3347 return;
3348
3349 read_lock_bh(&arm_state->susp_res_lock);
3350 vc_suspend_state = arm_state->vc_suspend_state;
3351 vc_resume_state = arm_state->vc_resume_state;
3352 peer_count = arm_state->peer_use_count;
3353 vc_use_count = arm_state->videocore_use_count;
3354 active_services = state->unused_service;
3355 if (active_services > MAX_SERVICES)
3356 only_nonzero = 1;
3357
3358 for (i = 0; i < active_services; i++) {
3359 struct vchiq_service *service_ptr = state->services[i];
3360
3361 if (!service_ptr)
3362 continue;
3363
3364 if (only_nonzero && !service_ptr->service_use_count)
3365 continue;
3366
3367 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
3368 continue;
3369
3370 service_data[found].fourcc = service_ptr->base.fourcc;
3371 service_data[found].clientid = service_ptr->client_id;
3372 service_data[found].use_count = service_ptr->service_use_count;
3373 found++;
3374 if (found >= MAX_SERVICES)
3375 break;
3376 }
3377
3378 read_unlock_bh(&arm_state->susp_res_lock);
3379
3380 vchiq_log_warning(vchiq_susp_log_level,
3381 "-- Videcore suspend state: %s --",
3382 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
3383 vchiq_log_warning(vchiq_susp_log_level,
3384 "-- Videcore resume state: %s --",
3385 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
3386
3387 if (only_nonzero)
3388 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
3389 "services (%d). Only dumping up to first %d services "
3390 "with non-zero use-count", active_services, found);
3391
3392 for (i = 0; i < found; i++) {
3393 vchiq_log_warning(vchiq_susp_log_level,
3394 "----- %c%c%c%c:%d service count %d %s",
3395 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
3396 service_data[i].clientid,
3397 service_data[i].use_count,
3398 service_data[i].use_count ? nz : "");
3399 }
3400 vchiq_log_warning(vchiq_susp_log_level,
3401 "----- VCHIQ use count count %d", peer_count);
3402 vchiq_log_warning(vchiq_susp_log_level,
3403 "--- Overall vchiq instance use count %d", vc_use_count);
3404
3405 kfree(service_data);
3406
3407 vchiq_dump_platform_use_state(state);
3408 }
3409
3410 VCHIQ_STATUS_T
3411 vchiq_check_service(struct vchiq_service *service)
3412 {
3413 struct vchiq_arm_state *arm_state;
3414 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3415
3416 if (!service || !service->state)
3417 goto out;
3418
3419 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3420
3421 arm_state = vchiq_platform_get_arm_state(service->state);
3422
3423 read_lock_bh(&arm_state->susp_res_lock);
3424 if (service->service_use_count)
3425 ret = VCHIQ_SUCCESS;
3426 read_unlock_bh(&arm_state->susp_res_lock);
3427
3428 if (ret == VCHIQ_ERROR) {
3429 vchiq_log_error(vchiq_susp_log_level,
3430 "%s ERROR - %c%c%c%c:%d service count %d, "
3431 "state count %d, videocore suspend state %s", __func__,
3432 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
3433 service->client_id, service->service_use_count,
3434 arm_state->videocore_use_count,
3435 suspend_state_names[arm_state->vc_suspend_state +
3436 VC_SUSPEND_NUM_OFFSET]);
3437 vchiq_dump_service_use_state(service->state);
3438 }
3439 out:
3440 return ret;
3441 }
3442
3443 /* stub functions */
3444 void vchiq_on_remote_use_active(struct vchiq_state *state)
3445 {
3446 (void)state;
3447 }
3448
3449 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
3450 VCHIQ_CONNSTATE_T oldstate,
3451 VCHIQ_CONNSTATE_T newstate)
3452 {
3453 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
3454
3455 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
3456 get_conn_state_name(oldstate), get_conn_state_name(newstate));
3457 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
3458 write_lock_bh(&arm_state->susp_res_lock);
3459 if (!arm_state->first_connect) {
3460 char threadname[16];
3461
3462 arm_state->first_connect = 1;
3463 write_unlock_bh(&arm_state->susp_res_lock);
3464 snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
3465 state->id);
3466 arm_state->ka_thread = kthread_create(
3467 &vchiq_keepalive_thread_func,
3468 (void *)state,
3469 threadname);
3470 if (IS_ERR(arm_state->ka_thread)) {
3471 vchiq_log_error(vchiq_susp_log_level,
3472 "vchiq: FATAL: couldn't create thread %s",
3473 threadname);
3474 } else {
3475 wake_up_process(arm_state->ka_thread);
3476 }
3477 } else
3478 write_unlock_bh(&arm_state->susp_res_lock);
3479 }
3480 }
3481
3482 static const struct of_device_id vchiq_of_match[] = {
3483 { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
3484 { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
3485 {},
3486 };
3487 MODULE_DEVICE_TABLE(of, vchiq_of_match);
3488
3489 static struct platform_device *
3490 vchiq_register_child(struct platform_device *pdev, const char *name)
3491 {
3492 struct platform_device_info pdevinfo;
3493 struct platform_device *child;
3494
3495 memset(&pdevinfo, 0, sizeof(pdevinfo));
3496
3497 pdevinfo.parent = &pdev->dev;
3498 pdevinfo.name = name;
3499 pdevinfo.id = PLATFORM_DEVID_NONE;
3500 pdevinfo.dma_mask = DMA_BIT_MASK(32);
3501
3502 child = platform_device_register_full(&pdevinfo);
3503 if (IS_ERR(child)) {
3504 dev_warn(&pdev->dev, "%s not registered\n", name);
3505 child = NULL;
3506 }
3507
3508 return child;
3509 }
3510
3511 static int vchiq_probe(struct platform_device *pdev)
3512 {
3513 struct device_node *fw_node;
3514 const struct of_device_id *of_id;
3515 struct vchiq_drvdata *drvdata;
3516 struct device *vchiq_dev;
3517 int err;
3518
3519 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
3520 drvdata = (struct vchiq_drvdata *)of_id->data;
3521 if (!drvdata)
3522 return -EINVAL;
3523
3524 fw_node = of_find_compatible_node(NULL, NULL,
3525 "raspberrypi,bcm2835-firmware");
3526 if (!fw_node) {
3527 dev_err(&pdev->dev, "Missing firmware node\n");
3528 return -ENOENT;
3529 }
3530
3531 drvdata->fw = rpi_firmware_get(fw_node);
3532 of_node_put(fw_node);
3533 if (!drvdata->fw)
3534 return -EPROBE_DEFER;
3535
3536 platform_set_drvdata(pdev, drvdata);
3537
3538 err = vchiq_platform_init(pdev, &g_state);
3539 if (err != 0)
3540 goto failed_platform_init;
3541
3542 cdev_init(&vchiq_cdev, &vchiq_fops);
3543 vchiq_cdev.owner = THIS_MODULE;
3544 err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
3545 if (err != 0) {
3546 vchiq_log_error(vchiq_arm_log_level,
3547 "Unable to register device");
3548 goto failed_platform_init;
3549 }
3550
3551 vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
3552 "vchiq");
3553 if (IS_ERR(vchiq_dev)) {
3554 err = PTR_ERR(vchiq_dev);
3555 goto failed_device_create;
3556 }
3557
3558 vchiq_debugfs_init();
3559
3560 vchiq_log_info(vchiq_arm_log_level,
3561 "vchiq: initialised - version %d (min %d), device %d.%d",
3562 VCHIQ_VERSION, VCHIQ_VERSION_MIN,
3563 MAJOR(vchiq_devid), MINOR(vchiq_devid));
3564
3565 bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
3566 bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
3567
3568 return 0;
3569
3570 failed_device_create:
3571 cdev_del(&vchiq_cdev);
3572 failed_platform_init:
3573 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
3574 return err;
3575 }
3576
3577 static int vchiq_remove(struct platform_device *pdev)
3578 {
3579 platform_device_unregister(bcm2835_camera);
3580 vchiq_debugfs_deinit();
3581 device_destroy(vchiq_class, vchiq_devid);
3582 cdev_del(&vchiq_cdev);
3583
3584 return 0;
3585 }
3586
3587 static struct platform_driver vchiq_driver = {
3588 .driver = {
3589 .name = "bcm2835_vchiq",
3590 .of_match_table = vchiq_of_match,
3591 },
3592 .probe = vchiq_probe,
3593 .remove = vchiq_remove,
3594 };
3595
3596 static int __init vchiq_driver_init(void)
3597 {
3598 int ret;
3599
3600 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
3601 if (IS_ERR(vchiq_class)) {
3602 pr_err("Failed to create vchiq class\n");
3603 return PTR_ERR(vchiq_class);
3604 }
3605
3606 ret = alloc_chrdev_region(&vchiq_devid, 0, 1, DEVICE_NAME);
3607 if (ret) {
3608 pr_err("Failed to allocate vchiq's chrdev region\n");
3609 goto class_destroy;
3610 }
3611
3612 ret = platform_driver_register(&vchiq_driver);
3613 if (ret) {
3614 pr_err("Failed to register vchiq driver\n");
3615 goto region_unregister;
3616 }
3617
3618 return 0;
3619
3620 region_unregister:
3621 platform_driver_unregister(&vchiq_driver);
3622
3623 class_destroy:
3624 class_destroy(vchiq_class);
3625
3626 return ret;
3627 }
3628 module_init(vchiq_driver_init);
3629
3630 static void __exit vchiq_driver_exit(void)
3631 {
3632 platform_driver_unregister(&vchiq_driver);
3633 unregister_chrdev_region(vchiq_devid, 1);
3634 class_destroy(vchiq_class);
3635 }
3636 module_exit(vchiq_driver_exit);
3637
3638 MODULE_LICENSE("Dual BSD/GPL");
3639 MODULE_DESCRIPTION("Videocore VCHIQ driver");
3640 MODULE_AUTHOR("Broadcom Corporation");