]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/char/broadcom/vc_sm/vmcs_sm.c
vcsm: Revert to do page-table-walk-based cache manipulating on some ioctl calls
[mirror_ubuntu-bionic-kernel.git] / drivers / char / broadcom / vc_sm / vmcs_sm.c
CommitLineData
46bc9043
TG
1/*
2 ****************************************************************************
3 * Copyright 2011-2012 Broadcom Corporation. All rights reserved.
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available at
8 * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 ****************************************************************************
15 */
16
17/* ---- Include Files ----------------------------------------------------- */
18
19#include <linux/cdev.h>
20#include <linux/broadcom/vc_mem.h>
21#include <linux/device.h>
22#include <linux/debugfs.h>
23#include <linux/dma-mapping.h>
24#include <linux/dma-buf.h>
25#include <linux/errno.h>
26#include <linux/fs.h>
27#include <linux/hugetlb.h>
28#include <linux/ioctl.h>
29#include <linux/kernel.h>
30#include <linux/list.h>
31#include <linux/module.h>
32#include <linux/mm.h>
33#include <linux/of.h>
34#include <linux/platform_device.h>
35#include <linux/pfn.h>
36#include <linux/proc_fs.h>
37#include <linux/pagemap.h>
38#include <linux/semaphore.h>
39#include <linux/slab.h>
40#include <linux/seq_file.h>
41#include <linux/types.h>
42#include <asm/cacheflush.h>
43
44#include "vchiq_connected.h"
45#include "vc_vchi_sm.h"
46
47#include <linux/broadcom/vmcs_sm_ioctl.h>
48#include "vc_sm_knl.h"
49
50/* ---- Private Constants and Types --------------------------------------- */
51
52#define DEVICE_NAME "vcsm"
53#define DRIVER_NAME "bcm2835-vcsm"
54#define DEVICE_MINOR 0
55
56#define VC_SM_DIR_ROOT_NAME "vc-smem"
57#define VC_SM_DIR_ALLOC_NAME "alloc"
58#define VC_SM_STATE "state"
59#define VC_SM_STATS "statistics"
60#define VC_SM_RESOURCES "resources"
61#define VC_SM_DEBUG "debug"
62#define VC_SM_WRITE_BUF_SIZE 128
63
64/* Statistics tracked per resource and globally. */
65enum sm_stats_t {
66 /* Attempt. */
67 ALLOC,
68 FREE,
69 LOCK,
70 UNLOCK,
71 MAP,
72 FLUSH,
73 INVALID,
74 IMPORT,
75
76 END_ATTEMPT,
77
78 /* Failure. */
79 ALLOC_FAIL,
80 FREE_FAIL,
81 LOCK_FAIL,
82 UNLOCK_FAIL,
83 MAP_FAIL,
84 FLUSH_FAIL,
85 INVALID_FAIL,
86 IMPORT_FAIL,
87
88 END_ALL,
89
90};
91
92static const char *const sm_stats_human_read[] = {
93 "Alloc",
94 "Free",
95 "Lock",
96 "Unlock",
97 "Map",
98 "Cache Flush",
99 "Cache Invalidate",
100 "Import",
101};
102
103typedef int (*VC_SM_SHOW) (struct seq_file *s, void *v);
104struct sm_pde_t {
105 VC_SM_SHOW show; /* Debug fs function hookup. */
106 struct dentry *dir_entry; /* Debug fs directory entry. */
107 void *priv_data; /* Private data */
108
109};
110
111/* Single resource allocation tracked for all devices. */
112struct sm_mmap {
113 struct list_head map_list; /* Linked list of maps. */
114
115 struct sm_resource_t *resource; /* Pointer to the resource. */
116
117 pid_t res_pid; /* PID owning that resource. */
118 unsigned int res_vc_hdl; /* Resource handle (videocore). */
119 unsigned int res_usr_hdl; /* Resource handle (user). */
120
121 unsigned long res_addr; /* Mapped virtual address. */
122 struct vm_area_struct *vma; /* VM area for this mapping. */
123 unsigned int ref_count; /* Reference count to this vma. */
124
125 /* Used to link maps associated with a resource. */
126 struct list_head resource_map_list;
127};
128
129/* Single resource allocation tracked for each opened device. */
130struct sm_resource_t {
131 struct list_head resource_list; /* List of resources. */
132 struct list_head global_resource_list; /* Global list of resources. */
133
134 pid_t pid; /* PID owning that resource. */
135 uint32_t res_guid; /* Unique identifier. */
136 uint32_t lock_count; /* Lock count for this resource. */
137 uint32_t ref_count; /* Ref count for this resource. */
138
139 uint32_t res_handle; /* Resource allocation handle. */
140 void *res_base_mem; /* Resource base memory address. */
141 uint32_t res_size; /* Resource size allocated. */
142 enum vmcs_sm_cache_e res_cached; /* Resource cache type. */
143 struct sm_resource_t *res_shared; /* Shared resource */
144
145 enum sm_stats_t res_stats[END_ALL]; /* Resource statistics. */
146
147 uint8_t map_count; /* Counter of mappings for this resource. */
148 struct list_head map_list; /* Maps associated with a resource. */
149
150 /* DMABUF related fields */
151 struct dma_buf *dma_buf;
152 struct dma_buf_attachment *attach;
153 struct sg_table *sgt;
154 dma_addr_t dma_addr;
155
156 struct sm_priv_data_t *private;
157 bool map; /* whether to map pages up front */
158};
159
160/* Private file data associated with each opened device. */
161struct sm_priv_data_t {
162 struct list_head resource_list; /* List of resources. */
163
164 pid_t pid; /* PID of creator. */
165
166 struct dentry *dir_pid; /* Debug fs entries root. */
167 struct sm_pde_t dir_stats; /* Debug fs entries statistics sub-tree. */
168 struct sm_pde_t dir_res; /* Debug fs resource sub-tree. */
169
170 int restart_sys; /* Tracks restart on interrupt. */
171 enum vc_sm_msg_type int_action; /* Interrupted action. */
172 uint32_t int_trans_id; /* Interrupted transaction. */
173
174};
175
176/* Global state information. */
177struct sm_state_t {
178 struct platform_device *pdev;
179 struct sm_instance *sm_handle; /* Handle for videocore service. */
180 struct dentry *dir_root; /* Debug fs entries root. */
181 struct dentry *dir_alloc; /* Debug fs entries allocations. */
182 struct sm_pde_t dir_stats; /* Debug fs entries statistics sub-tree. */
183 struct sm_pde_t dir_state; /* Debug fs entries state sub-tree. */
184 struct dentry *debug; /* Debug fs entries debug. */
185
186 struct mutex map_lock; /* Global map lock. */
187 struct list_head map_list; /* List of maps. */
188 struct list_head resource_list; /* List of resources. */
189
190 enum sm_stats_t deceased[END_ALL]; /* Natural termination stats. */
191 enum sm_stats_t terminated[END_ALL]; /* Forced termination stats. */
192 uint32_t res_deceased_cnt; /* Natural termination counter. */
193 uint32_t res_terminated_cnt; /* Forced termination counter. */
194
195 struct cdev sm_cdev; /* Device. */
196 dev_t sm_devid; /* Device identifier. */
197 struct class *sm_class; /* Class. */
198 struct device *sm_dev; /* Device. */
199
200 struct sm_priv_data_t *data_knl; /* Kernel internal data tracking. */
201
202 struct mutex lock; /* Global lock. */
203 uint32_t guid; /* GUID (next) tracker. */
204
205};
206
207/* ---- Private Variables ----------------------------------------------- */
208
209static struct sm_state_t *sm_state;
210static int sm_inited;
211
212#if 0
213static const char *const sm_cache_map_vector[] = {
214 "(null)",
215 "host",
216 "videocore",
217 "host+videocore",
218};
219#endif
220
221/* ---- Private Function Prototypes -------------------------------------- */
222
223/* ---- Private Functions ------------------------------------------------ */
224
225static inline unsigned int vcaddr_to_pfn(unsigned long vc_addr)
226{
227 unsigned long pfn = vc_addr & 0x3FFFFFFF;
228
229 pfn += mm_vc_mem_phys_addr;
230 pfn >>= PAGE_SHIFT;
231 return pfn;
232}
233
234/*
235 * Carries over to the state statistics the statistics once owned by a deceased
236 * resource.
237 */
238static void vc_sm_resource_deceased(struct sm_resource_t *p_res, int terminated)
239{
240 if (sm_state != NULL) {
241 if (p_res != NULL) {
242 int ix;
243
244 if (terminated)
245 sm_state->res_terminated_cnt++;
246 else
247 sm_state->res_deceased_cnt++;
248
249 for (ix = 0; ix < END_ALL; ix++) {
250 if (terminated)
251 sm_state->terminated[ix] +=
252 p_res->res_stats[ix];
253 else
254 sm_state->deceased[ix] +=
255 p_res->res_stats[ix];
256 }
257 }
258 }
259}
260
261/*
262 * Fetch a videocore handle corresponding to a mapping of the pid+address
263 * returns 0 (ie NULL) if no such handle exists in the global map.
264 */
265static unsigned int vmcs_sm_vc_handle_from_pid_and_address(unsigned int pid,
266 unsigned int addr)
267{
268 struct sm_mmap *map = NULL;
269 unsigned int handle = 0;
270
271 if (!sm_state || addr == 0)
272 goto out;
273
274 mutex_lock(&(sm_state->map_lock));
275
276 /* Lookup the resource. */
277 if (!list_empty(&sm_state->map_list)) {
278 list_for_each_entry(map, &sm_state->map_list, map_list) {
44fad971
SY
279 if (map->res_pid != pid)
280 continue;
a12252ee
SY
281 if (addr < map->res_addr ||
282 addr >= (map->res_addr + map->resource->res_size))
46bc9043
TG
283 continue;
284
285 pr_debug("[%s]: global map %p (pid %u, addr %lx) -> vc-hdl %x (usr-hdl %x)\n",
286 __func__, map, map->res_pid, map->res_addr,
287 map->res_vc_hdl, map->res_usr_hdl);
288
289 handle = map->res_vc_hdl;
290 break;
291 }
292 }
293
294 mutex_unlock(&(sm_state->map_lock));
295
296out:
297 /*
298 * Use a debug log here as it may be a valid situation that we query
299 * for something that is not mapped, we do not want a kernel log each
300 * time around.
301 *
302 * There are other error log that would pop up accordingly if someone
303 * subsequently tries to use something invalid after being told not to
304 * use it...
305 */
306 if (handle == 0) {
307 pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
308 __func__, pid, addr);
309 }
310
311 return handle;
312}
313
314/*
315 * Fetch a user handle corresponding to a mapping of the pid+address
316 * returns 0 (ie NULL) if no such handle exists in the global map.
317 */
318static unsigned int vmcs_sm_usr_handle_from_pid_and_address(unsigned int pid,
319 unsigned int addr)
320{
321 struct sm_mmap *map = NULL;
322 unsigned int handle = 0;
323
324 if (!sm_state || addr == 0)
325 goto out;
326
327 mutex_lock(&(sm_state->map_lock));
328
329 /* Lookup the resource. */
330 if (!list_empty(&sm_state->map_list)) {
331 list_for_each_entry(map, &sm_state->map_list, map_list) {
44fad971
SY
332 if (map->res_pid != pid)
333 continue;
8070f126
SY
334 if (addr < map->res_addr ||
335 addr >= (map->res_addr + map->resource->res_size))
46bc9043
TG
336 continue;
337
338 pr_debug("[%s]: global map %p (pid %u, addr %lx) -> usr-hdl %x (vc-hdl %x)\n",
339 __func__, map, map->res_pid, map->res_addr,
340 map->res_usr_hdl, map->res_vc_hdl);
341
342 handle = map->res_usr_hdl;
343 break;
344 }
345 }
346
347 mutex_unlock(&(sm_state->map_lock));
348
349out:
350 /*
351 * Use a debug log here as it may be a valid situation that we query
352 * for something that is not mapped yet.
353 *
354 * There are other error log that would pop up accordingly if someone
355 * subsequently tries to use something invalid after being told not to
356 * use it...
357 */
358 if (handle == 0)
359 pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
360 __func__, pid, addr);
361
362 return handle;
363}
364
365#if defined(DO_NOT_USE)
366/*
367 * Fetch an address corresponding to a mapping of the pid+handle
368 * returns 0 (ie NULL) if no such address exists in the global map.
369 */
370static unsigned int vmcs_sm_usr_address_from_pid_and_vc_handle(unsigned int pid,
371 unsigned int hdl)
372{
373 struct sm_mmap *map = NULL;
374 unsigned int addr = 0;
375
376 if (sm_state == NULL || hdl == 0)
377 goto out;
378
379 mutex_lock(&(sm_state->map_lock));
380
381 /* Lookup the resource. */
382 if (!list_empty(&sm_state->map_list)) {
383 list_for_each_entry(map, &sm_state->map_list, map_list) {
384 if (map->res_pid != pid || map->res_vc_hdl != hdl)
385 continue;
386
387 pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
388 __func__, map, map->res_pid, map->res_vc_hdl,
389 map->res_usr_hdl, map->res_addr);
390
391 addr = map->res_addr;
392 break;
393 }
394 }
395
396 mutex_unlock(&(sm_state->map_lock));
397
398out:
399 /*
400 * Use a debug log here as it may be a valid situation that we query
401 * for something that is not mapped, we do not want a kernel log each
402 * time around.
403 *
404 * There are other error log that would pop up accordingly if someone
405 * subsequently tries to use something invalid after being told not to
406 * use it...
407 */
408 if (addr == 0)
409 pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n",
410 __func__, pid, hdl);
411
412 return addr;
413}
414#endif
415
416/*
417 * Fetch an address corresponding to a mapping of the pid+handle
418 * returns 0 (ie NULL) if no such address exists in the global map.
419 */
420static unsigned int vmcs_sm_usr_address_from_pid_and_usr_handle(unsigned int
421 pid,
422 unsigned int
423 hdl)
424{
425 struct sm_mmap *map = NULL;
426 unsigned int addr = 0;
427
428 if (sm_state == NULL || hdl == 0)
429 goto out;
430
431 mutex_lock(&(sm_state->map_lock));
432
433 /* Lookup the resource. */
434 if (!list_empty(&sm_state->map_list)) {
435 list_for_each_entry(map, &sm_state->map_list, map_list) {
436 if (map->res_pid != pid || map->res_usr_hdl != hdl)
437 continue;
438
439 pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
440 __func__, map, map->res_pid, map->res_vc_hdl,
441 map->res_usr_hdl, map->res_addr);
442
443 addr = map->res_addr;
444 break;
445 }
446 }
447
448 mutex_unlock(&(sm_state->map_lock));
449
450out:
451 /*
452 * Use a debug log here as it may be a valid situation that we query
453 * for something that is not mapped, we do not want a kernel log each
454 * time around.
455 *
456 * There are other error log that would pop up accordingly if someone
457 * subsequently tries to use something invalid after being told not to
458 * use it...
459 */
460 if (addr == 0)
461 pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n", __func__,
462 pid, hdl);
463
464 return addr;
465}
466
467/* Adds a resource mapping to the global data list. */
468static void vmcs_sm_add_map(struct sm_state_t *state,
469 struct sm_resource_t *resource, struct sm_mmap *map)
470{
471 mutex_lock(&(state->map_lock));
472
473 /* Add to the global list of mappings */
474 list_add(&map->map_list, &state->map_list);
475
476 /* Add to the list of mappings for this resource */
477 list_add(&map->resource_map_list, &resource->map_list);
478 resource->map_count++;
479
480 mutex_unlock(&(state->map_lock));
481
482 pr_debug("[%s]: added map %p (pid %u, vc-hdl %x, usr-hdl %x, addr %lx)\n",
483 __func__, map, map->res_pid, map->res_vc_hdl,
484 map->res_usr_hdl, map->res_addr);
485}
486
487/* Removes a resource mapping from the global data list. */
488static void vmcs_sm_remove_map(struct sm_state_t *state,
489 struct sm_resource_t *resource,
490 struct sm_mmap *map)
491{
492 mutex_lock(&(state->map_lock));
493
494 /* Remove from the global list of mappings */
495 list_del(&map->map_list);
496
497 /* Remove from the list of mapping for this resource */
498 list_del(&map->resource_map_list);
499 if (resource->map_count > 0)
500 resource->map_count--;
501
502 mutex_unlock(&(state->map_lock));
503
504 pr_debug("[%s]: removed map %p (pid %d, vc-hdl %x, usr-hdl %x, addr %lx)\n",
505 __func__, map, map->res_pid, map->res_vc_hdl, map->res_usr_hdl,
506 map->res_addr);
507
508 kfree(map);
509}
510
511/* Read callback for the global state proc entry. */
512static int vc_sm_global_state_show(struct seq_file *s, void *v)
513{
514 struct sm_mmap *map = NULL;
515 struct sm_resource_t *resource = NULL;
516 int map_count = 0;
517 int resource_count = 0;
518
519 if (sm_state == NULL)
520 return 0;
521
522 seq_printf(s, "\nVC-ServiceHandle 0x%x\n",
523 (unsigned int)sm_state->sm_handle);
524
525 /* Log all applicable mapping(s). */
526
527 mutex_lock(&(sm_state->map_lock));
528 seq_puts(s, "\nResources\n");
529 if (!list_empty(&sm_state->resource_list)) {
530 list_for_each_entry(resource, &sm_state->resource_list,
531 global_resource_list) {
532 resource_count++;
533
534 seq_printf(s, "\nResource %p\n",
535 resource);
536 seq_printf(s, " PID %u\n",
537 resource->pid);
538 seq_printf(s, " RES_GUID 0x%x\n",
539 resource->res_guid);
540 seq_printf(s, " LOCK_COUNT %u\n",
541 resource->lock_count);
542 seq_printf(s, " REF_COUNT %u\n",
543 resource->ref_count);
544 seq_printf(s, " res_handle 0x%X\n",
545 resource->res_handle);
546 seq_printf(s, " res_base_mem %p\n",
547 resource->res_base_mem);
548 seq_printf(s, " SIZE %d\n",
549 resource->res_size);
550 seq_printf(s, " DMABUF %p\n",
551 resource->dma_buf);
552 seq_printf(s, " ATTACH %p\n",
553 resource->attach);
554 seq_printf(s, " SGT %p\n",
555 resource->sgt);
556 seq_printf(s, " DMA_ADDR %pad\n",
557 &resource->dma_addr);
558 }
559 }
560 seq_printf(s, "\n\nTotal resource count: %d\n\n", resource_count);
561
562 seq_puts(s, "\nMappings\n");
563 if (!list_empty(&sm_state->map_list)) {
564 list_for_each_entry(map, &sm_state->map_list, map_list) {
565 map_count++;
566
567 seq_printf(s, "\nMapping 0x%x\n",
568 (unsigned int)map);
569 seq_printf(s, " TGID %u\n",
570 map->res_pid);
571 seq_printf(s, " VC-HDL 0x%x\n",
572 map->res_vc_hdl);
573 seq_printf(s, " USR-HDL 0x%x\n",
574 map->res_usr_hdl);
575 seq_printf(s, " USR-ADDR 0x%lx\n",
576 map->res_addr);
577 seq_printf(s, " SIZE %d\n",
578 map->resource->res_size);
579 }
580 }
581
582 mutex_unlock(&(sm_state->map_lock));
583 seq_printf(s, "\n\nTotal map count: %d\n\n", map_count);
584
585 return 0;
586}
587
588static int vc_sm_global_statistics_show(struct seq_file *s, void *v)
589{
590 int ix;
591
592 /* Global state tracked statistics. */
593 if (sm_state != NULL) {
594 seq_puts(s, "\nDeceased Resources Statistics\n");
595
596 seq_printf(s, "\nNatural Cause (%u occurences)\n",
597 sm_state->res_deceased_cnt);
598 for (ix = 0; ix < END_ATTEMPT; ix++) {
599 if (sm_state->deceased[ix] > 0) {
600 seq_printf(s, " %u\t%s\n",
601 sm_state->deceased[ix],
602 sm_stats_human_read[ix]);
603 }
604 }
605 seq_puts(s, "\n");
606 for (ix = 0; ix < END_ATTEMPT; ix++) {
607 if (sm_state->deceased[ix + END_ATTEMPT] > 0) {
608 seq_printf(s, " %u\tFAILED %s\n",
609 sm_state->deceased[ix + END_ATTEMPT],
610 sm_stats_human_read[ix]);
611 }
612 }
613
614 seq_printf(s, "\nForcefull (%u occurences)\n",
615 sm_state->res_terminated_cnt);
616 for (ix = 0; ix < END_ATTEMPT; ix++) {
617 if (sm_state->terminated[ix] > 0) {
618 seq_printf(s, " %u\t%s\n",
619 sm_state->terminated[ix],
620 sm_stats_human_read[ix]);
621 }
622 }
623 seq_puts(s, "\n");
624 for (ix = 0; ix < END_ATTEMPT; ix++) {
625 if (sm_state->terminated[ix + END_ATTEMPT] > 0) {
626 seq_printf(s, " %u\tFAILED %s\n",
627 sm_state->terminated[ix +
628 END_ATTEMPT],
629 sm_stats_human_read[ix]);
630 }
631 }
632 }
633
634 return 0;
635}
636
637#if 0
638/* Read callback for the statistics proc entry. */
639static int vc_sm_statistics_show(struct seq_file *s, void *v)
640{
641 int ix;
642 struct sm_priv_data_t *file_data;
643 struct sm_resource_t *resource;
644 int res_count = 0;
645 struct sm_pde_t *p_pde;
646
647 p_pde = (struct sm_pde_t *)(s->private);
648 file_data = (struct sm_priv_data_t *)(p_pde->priv_data);
649
650 if (file_data == NULL)
651 return 0;
652
653 /* Per process statistics. */
654
655 seq_printf(s, "\nStatistics for TGID %d\n", file_data->pid);
656
657 mutex_lock(&(sm_state->map_lock));
658
659 if (!list_empty(&file_data->resource_list)) {
660 list_for_each_entry(resource, &file_data->resource_list,
661 resource_list) {
662 res_count++;
663
664 seq_printf(s, "\nGUID: 0x%x\n\n",
665 resource->res_guid);
666 for (ix = 0; ix < END_ATTEMPT; ix++) {
667 if (resource->res_stats[ix] > 0) {
668 seq_printf(s,
669 " %u\t%s\n",
670 resource->res_stats[ix],
671 sm_stats_human_read[ix]);
672 }
673 }
674 seq_puts(s, "\n");
675 for (ix = 0; ix < END_ATTEMPT; ix++) {
676 if (resource->res_stats[ix + END_ATTEMPT] > 0) {
677 seq_printf(s,
678 " %u\tFAILED %s\n",
679 resource->res_stats[
680 ix + END_ATTEMPT],
681 sm_stats_human_read[ix]);
682 }
683 }
684 }
685 }
686
687 mutex_unlock(&(sm_state->map_lock));
688
689 seq_printf(s, "\nResources Count %d\n", res_count);
690
691 return 0;
692}
693#endif
694
695#if 0
696/* Read callback for the allocation proc entry. */
697static int vc_sm_alloc_show(struct seq_file *s, void *v)
698{
699 struct sm_priv_data_t *file_data;
700 struct sm_resource_t *resource;
701 int alloc_count = 0;
702 struct sm_pde_t *p_pde;
703
704 p_pde = (struct sm_pde_t *)(s->private);
705 file_data = (struct sm_priv_data_t *)(p_pde->priv_data);
706
707 if (!file_data)
708 return 0;
709
710 /* Per process statistics. */
711 seq_printf(s, "\nAllocation for TGID %d\n", file_data->pid);
712
713 mutex_lock(&(sm_state->map_lock));
714
715 if (!list_empty(&file_data->resource_list)) {
716 list_for_each_entry(resource, &file_data->resource_list,
717 resource_list) {
718 alloc_count++;
719
720 seq_printf(s, "\nGUID: 0x%x\n",
721 resource->res_guid);
722 seq_printf(s, "Lock Count: %u\n",
723 resource->lock_count);
724 seq_printf(s, "Mapped: %s\n",
725 (resource->map_count ? "yes" : "no"));
726 seq_printf(s, "VC-handle: 0x%x\n",
727 resource->res_handle);
728 seq_printf(s, "VC-address: 0x%p\n",
729 resource->res_base_mem);
730 seq_printf(s, "VC-size (bytes): %u\n",
731 resource->res_size);
732 seq_printf(s, "Cache: %s\n",
733 sm_cache_map_vector[resource->res_cached]);
734 }
735 }
736
737 mutex_unlock(&(sm_state->map_lock));
738
739 seq_printf(s, "\n\nTotal allocation count: %d\n\n", alloc_count);
740
741 return 0;
742}
743#endif
744
745static int vc_sm_seq_file_show(struct seq_file *s, void *v)
746{
747 struct sm_pde_t *sm_pde;
748
749 sm_pde = (struct sm_pde_t *)(s->private);
750
751 if (sm_pde && sm_pde->show)
752 sm_pde->show(s, v);
753
754 return 0;
755}
756
757static int vc_sm_single_open(struct inode *inode, struct file *file)
758{
759 return single_open(file, vc_sm_seq_file_show, inode->i_private);
760}
761
762static const struct file_operations vc_sm_debug_fs_fops = {
763 .open = vc_sm_single_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
769/*
770 * Adds a resource to the private data list which tracks all the allocated
771 * data.
772 */
773static void vmcs_sm_add_resource(struct sm_priv_data_t *privdata,
774 struct sm_resource_t *resource)
775{
776 mutex_lock(&(sm_state->map_lock));
777 list_add(&resource->resource_list, &privdata->resource_list);
778 list_add(&resource->global_resource_list, &sm_state->resource_list);
779 mutex_unlock(&(sm_state->map_lock));
780
781 pr_debug("[%s]: added resource %p (base addr %p, hdl %x, size %u, cache %u)\n",
782 __func__, resource, resource->res_base_mem,
783 resource->res_handle, resource->res_size, resource->res_cached);
784}
785
786/*
787 * Locates a resource and acquire a reference on it.
788 * The resource won't be deleted while there is a reference on it.
789 */
790static struct sm_resource_t *vmcs_sm_acquire_resource(struct sm_priv_data_t
791 *private,
792 unsigned int res_guid)
793{
794 struct sm_resource_t *resource, *ret = NULL;
795
796 mutex_lock(&(sm_state->map_lock));
797
798 list_for_each_entry(resource, &private->resource_list, resource_list) {
799 if (resource->res_guid != res_guid)
800 continue;
801
802 pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
803 __func__, resource, resource->res_guid,
804 resource->res_base_mem, resource->res_handle,
805 resource->res_size, resource->res_cached);
806 resource->ref_count++;
807 ret = resource;
808 break;
809 }
810
811 mutex_unlock(&(sm_state->map_lock));
812
813 return ret;
814}
815
816/*
817 * Locates a resource and acquire a reference on it.
818 * The resource won't be deleted while there is a reference on it.
819 */
820static struct sm_resource_t *vmcs_sm_acquire_first_resource(
821 struct sm_priv_data_t *private)
822{
823 struct sm_resource_t *resource, *ret = NULL;
824
825 mutex_lock(&(sm_state->map_lock));
826
827 list_for_each_entry(resource, &private->resource_list, resource_list) {
828 pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
829 __func__, resource, resource->res_guid,
830 resource->res_base_mem, resource->res_handle,
831 resource->res_size, resource->res_cached);
832 resource->ref_count++;
833 ret = resource;
834 break;
835 }
836
837 mutex_unlock(&(sm_state->map_lock));
838
839 return ret;
840}
841
842/*
843 * Locates a resource and acquire a reference on it.
844 * The resource won't be deleted while there is a reference on it.
845 */
846static struct sm_resource_t *vmcs_sm_acquire_global_resource(unsigned int
847 res_guid)
848{
849 struct sm_resource_t *resource, *ret = NULL;
850
851 mutex_lock(&(sm_state->map_lock));
852
853 list_for_each_entry(resource, &sm_state->resource_list,
854 global_resource_list) {
855 if (resource->res_guid != res_guid)
856 continue;
857
858 pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
859 __func__, resource, resource->res_guid,
860 resource->res_base_mem, resource->res_handle,
861 resource->res_size, resource->res_cached);
862 resource->ref_count++;
863 ret = resource;
864 break;
865 }
866
867 mutex_unlock(&(sm_state->map_lock));
868
869 return ret;
870}
871
872/*
873 * Release a previously acquired resource.
874 * The resource will be deleted when its refcount reaches 0.
875 */
876static void vmcs_sm_release_resource(struct sm_resource_t *resource, int force)
877{
878 struct sm_priv_data_t *private = resource->private;
879 struct sm_mmap *map, *map_tmp;
880 struct sm_resource_t *res_tmp;
881 int ret;
882
883 mutex_lock(&(sm_state->map_lock));
884
885 if (--resource->ref_count) {
886 if (force)
887 pr_err("[%s]: resource %p in use\n", __func__, resource);
888
889 mutex_unlock(&(sm_state->map_lock));
890 return;
891 }
892
893 /* Time to free the resource. Start by removing it from the list */
894 list_del(&resource->resource_list);
895 list_del(&resource->global_resource_list);
896
897 /*
898 * Walk the global resource list, find out if the resource is used
899 * somewhere else. In which case we don't want to delete it.
900 */
901 list_for_each_entry(res_tmp, &sm_state->resource_list,
902 global_resource_list) {
903 if (res_tmp->res_handle == resource->res_handle) {
904 resource->res_handle = 0;
905 break;
906 }
907 }
908
909 mutex_unlock(&(sm_state->map_lock));
910
911 pr_debug("[%s]: freeing data - guid %x, hdl %x, base address %p\n",
912 __func__, resource->res_guid, resource->res_handle,
913 resource->res_base_mem);
914 resource->res_stats[FREE]++;
915
916 /* Make sure the resource we're removing is unmapped first */
917 if (resource->map_count && !list_empty(&resource->map_list)) {
918 down_write(&current->mm->mmap_sem);
919 list_for_each_entry_safe(map, map_tmp, &resource->map_list,
920 resource_map_list) {
921 ret =
922 do_munmap(current->mm, map->res_addr,
923 resource->res_size, NULL);
924 if (ret) {
925 pr_err("[%s]: could not unmap resource %p\n",
926 __func__, resource);
927 }
928 }
929 up_write(&current->mm->mmap_sem);
930 }
931
932 /* Free up the videocore allocated resource. */
933 if (resource->res_handle) {
934 struct vc_sm_free_t free = {
935 resource->res_handle, (uint32_t)resource->res_base_mem
936 };
937 int status = vc_vchi_sm_free(sm_state->sm_handle, &free,
938 &private->int_trans_id);
939 if (status != 0 && status != -EINTR) {
940 pr_err("[%s]: failed to free memory on videocore (status: %u, trans_id: %u)\n",
941 __func__, status, private->int_trans_id);
942 resource->res_stats[FREE_FAIL]++;
943 ret = -EPERM;
944 }
945 }
946
947 if (resource->sgt)
948 dma_buf_unmap_attachment(resource->attach, resource->sgt,
949 DMA_BIDIRECTIONAL);
950 if (resource->attach)
951 dma_buf_detach(resource->dma_buf, resource->attach);
952 if (resource->dma_buf)
953 dma_buf_put(resource->dma_buf);
954
955 /* Free up the shared resource. */
956 if (resource->res_shared)
957 vmcs_sm_release_resource(resource->res_shared, 0);
958
959 /* Free up the local resource tracking this allocation. */
960 vc_sm_resource_deceased(resource, force);
961 kfree(resource);
962}
963
964/*
965 * Dump the map table for the driver. If process is -1, dumps the whole table,
966 * if process is a valid pid (non -1) dump only the entries associated with the
967 * pid of interest.
968 */
969static void vmcs_sm_host_walk_map_per_pid(int pid)
970{
971 struct sm_mmap *map = NULL;
972
973 /* Make sure the device was started properly. */
974 if (sm_state == NULL) {
975 pr_err("[%s]: invalid device\n", __func__);
976 return;
977 }
978
979 mutex_lock(&(sm_state->map_lock));
980
981 /* Log all applicable mapping(s). */
982 if (!list_empty(&sm_state->map_list)) {
983 list_for_each_entry(map, &sm_state->map_list, map_list) {
984 if (pid == -1 || map->res_pid == pid) {
985 pr_info("[%s]: tgid: %u - vc-hdl: %x, usr-hdl: %x, usr-addr: %lx\n",
986 __func__, map->res_pid, map->res_vc_hdl,
987 map->res_usr_hdl, map->res_addr);
988 }
989 }
990 }
991
992 mutex_unlock(&(sm_state->map_lock));
993}
994
995/*
996 * Dump the allocation table from host side point of view. This only dumps the
997 * data allocated for this process/device referenced by the file_data.
998 */
999static void vmcs_sm_host_walk_alloc(struct sm_priv_data_t *file_data)
1000{
1001 struct sm_resource_t *resource = NULL;
1002
1003 /* Make sure the device was started properly. */
1004 if ((sm_state == NULL) || (file_data == NULL)) {
1005 pr_err("[%s]: invalid device\n", __func__);
1006 return;
1007 }
1008
1009 mutex_lock(&(sm_state->map_lock));
1010
1011 if (!list_empty(&file_data->resource_list)) {
1012 list_for_each_entry(resource, &file_data->resource_list,
1013 resource_list) {
1014 pr_info("[%s]: guid: %x - hdl: %x, vc-mem: %p, size: %u, cache: %u\n",
1015 __func__, resource->res_guid, resource->res_handle,
1016 resource->res_base_mem, resource->res_size,
1017 resource->res_cached);
1018 }
1019 }
1020
1021 mutex_unlock(&(sm_state->map_lock));
1022}
1023
1024/* Create support for private data tracking. */
1025static struct sm_priv_data_t *vc_sm_create_priv_data(pid_t id)
1026{
1027 char alloc_name[32];
1028 struct sm_priv_data_t *file_data = NULL;
1029
1030 /* Allocate private structure. */
1031 file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
1032
1033 if (!file_data) {
1034 pr_err("[%s]: cannot allocate file data\n", __func__);
1035 goto out;
1036 }
1037
1038 snprintf(alloc_name, sizeof(alloc_name), "%d", id);
1039
1040 INIT_LIST_HEAD(&file_data->resource_list);
1041 file_data->pid = id;
1042 file_data->dir_pid = debugfs_create_dir(alloc_name,
1043 sm_state->dir_alloc);
1044#if 0
1045 /* TODO: fix this to support querying statistics per pid */
1046
1047 if (IS_ERR_OR_NULL(file_data->dir_pid)) {
1048 file_data->dir_pid = NULL;
1049 } else {
1050 struct dentry *dir_entry;
1051
1052 dir_entry = debugfs_create_file(VC_SM_RESOURCES, 0444,
1053 file_data->dir_pid, file_data,
1054 vc_sm_debug_fs_fops);
1055
1056 file_data->dir_res.dir_entry = dir_entry;
1057 file_data->dir_res.priv_data = file_data;
1058 file_data->dir_res.show = &vc_sm_alloc_show;
1059
1060 dir_entry = debugfs_create_file(VC_SM_STATS, 0444,
1061 file_data->dir_pid, file_data,
1062 vc_sm_debug_fs_fops);
1063
1064 file_data->dir_res.dir_entry = dir_entry;
1065 file_data->dir_res.priv_data = file_data;
1066 file_data->dir_res.show = &vc_sm_statistics_show;
1067 }
1068 pr_debug("[%s]: private data allocated %p\n", __func__, file_data);
1069
1070#endif
1071out:
1072 return file_data;
1073}
1074
1075/*
1076 * Open the device. Creates a private state to help track all allocation
1077 * associated with this device.
1078 */
1079static int vc_sm_open(struct inode *inode, struct file *file)
1080{
1081 int ret = 0;
1082
1083 /* Make sure the device was started properly. */
1084 if (!sm_state) {
1085 pr_err("[%s]: invalid device\n", __func__);
1086 ret = -EPERM;
1087 goto out;
1088 }
1089
1090 file->private_data = vc_sm_create_priv_data(current->tgid);
1091 if (file->private_data == NULL) {
1092 pr_err("[%s]: failed to create data tracker\n", __func__);
1093
1094 ret = -ENOMEM;
1095 goto out;
1096 }
1097
1098out:
1099 return ret;
1100}
1101
1102/*
1103 * Close the device. Free up all resources still associated with this device
1104 * at the time.
1105 */
1106static int vc_sm_release(struct inode *inode, struct file *file)
1107{
1108 struct sm_priv_data_t *file_data =
1109 (struct sm_priv_data_t *)file->private_data;
1110 struct sm_resource_t *resource;
1111 int ret = 0;
1112
1113 /* Make sure the device was started properly. */
1114 if (sm_state == NULL || file_data == NULL) {
1115 pr_err("[%s]: invalid device\n", __func__);
1116 ret = -EPERM;
1117 goto out;
1118 }
1119
1120 pr_debug("[%s]: using private data %p\n", __func__, file_data);
1121
1122 if (file_data->restart_sys == -EINTR) {
1123 struct vc_sm_action_clean_t action_clean;
1124
1125 pr_debug("[%s]: releasing following EINTR on %u (trans_id: %u) (likely due to signal)...\n",
1126 __func__, file_data->int_action,
1127 file_data->int_trans_id);
1128
1129 action_clean.res_action = file_data->int_action;
1130 action_clean.action_trans_id = file_data->int_trans_id;
1131
1132 vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
1133 }
1134
1135 while ((resource = vmcs_sm_acquire_first_resource(file_data)) != NULL) {
1136 vmcs_sm_release_resource(resource, 0);
1137 vmcs_sm_release_resource(resource, 1);
1138 }
1139
1140 /* Remove the corresponding proc entry. */
1141 debugfs_remove_recursive(file_data->dir_pid);
1142
1143 /* Terminate the private data. */
1144 kfree(file_data);
1145
1146out:
1147 return ret;
1148}
1149
1150static void vcsm_vma_open(struct vm_area_struct *vma)
1151{
1152 struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
1153
1154 pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
1155 __func__, vma->vm_start, vma->vm_end, (int)current->tgid,
1156 (int)vma->vm_pgoff);
1157
1158 map->ref_count++;
1159}
1160
1161static void vcsm_vma_close(struct vm_area_struct *vma)
1162{
1163 struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
1164
1165 pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
1166 __func__, vma->vm_start, vma->vm_end, (int)current->tgid,
1167 (int)vma->vm_pgoff);
1168
1169 map->ref_count--;
1170
1171 /* Remove from the map table. */
1172 if (map->ref_count == 0)
1173 vmcs_sm_remove_map(sm_state, map->resource, map);
1174}
1175
1176static int vcsm_vma_fault(struct vm_fault *vmf)
1177{
1178 struct sm_mmap *map = (struct sm_mmap *)vmf->vma->vm_private_data;
1179 struct sm_resource_t *resource = map->resource;
1180 pgoff_t page_offset;
1181 unsigned long pfn;
1182 int ret = 0;
1183
1184 /* Lock the resource if necessary. */
1185 if (!resource->lock_count) {
1186 struct vc_sm_lock_unlock_t lock_unlock;
1187 struct vc_sm_lock_result_t lock_result;
1188 int status;
1189
1190 lock_unlock.res_handle = resource->res_handle;
1191 lock_unlock.res_mem = (uint32_t)resource->res_base_mem;
1192
1193 pr_debug("[%s]: attempt to lock data - hdl %x, base address %p\n",
1194 __func__, lock_unlock.res_handle,
1195 (void *)lock_unlock.res_mem);
1196
1197 /* Lock the videocore allocated resource. */
1198 status = vc_vchi_sm_lock(sm_state->sm_handle,
1199 &lock_unlock, &lock_result, 0);
1200 if (status || !lock_result.res_mem) {
1201 pr_err("[%s]: failed to lock memory on videocore (status: %u)\n",
1202 __func__, status);
1203 resource->res_stats[LOCK_FAIL]++;
1204 return VM_FAULT_SIGBUS;
1205 }
1206
1207 pfn = vcaddr_to_pfn((unsigned long)resource->res_base_mem);
1208 outer_inv_range(__pfn_to_phys(pfn),
1209 __pfn_to_phys(pfn) + resource->res_size);
1210
1211 resource->res_stats[LOCK]++;
1212 resource->lock_count++;
1213
1214 /* Keep track of the new base memory. */
1215 if (lock_result.res_mem &&
1216 lock_result.res_old_mem &&
1217 (lock_result.res_mem != lock_result.res_old_mem)) {
1218 resource->res_base_mem = (void *)lock_result.res_mem;
1219 }
1220 }
1221
1222 /* We don't use vmf->pgoff since that has the fake offset */
1223 page_offset = ((unsigned long)vmf->address - vmf->vma->vm_start);
1224 pfn = (uint32_t)resource->res_base_mem & 0x3FFFFFFF;
1225 pfn += mm_vc_mem_phys_addr;
1226 pfn += page_offset;
1227 pfn >>= PAGE_SHIFT;
1228
1229 /* Finally, remap it */
1230 ret = vm_insert_pfn(vmf->vma, (unsigned long)vmf->address, pfn);
1231
1232 switch (ret) {
1233 case 0:
1234 case -ERESTARTSYS:
1235 /*
1236 * EBUSY is ok: this just means that another thread
1237 * already did the job.
1238 */
1239 case -EBUSY:
1240 return VM_FAULT_NOPAGE;
1241 case -ENOMEM:
1242 case -EAGAIN:
1243 pr_err("[%s]: failed to map page pfn:%lx virt:%lx ret:%d\n", __func__,
1244 pfn, (unsigned long)vmf->address, ret);
1245 return VM_FAULT_OOM;
1246 default:
1247 pr_err("[%s]: failed to map page pfn:%lx virt:%lx ret:%d\n", __func__,
1248 pfn, (unsigned long)vmf->address, ret);
1249 return VM_FAULT_SIGBUS;
1250 }
1251}
1252
1253static const struct vm_operations_struct vcsm_vm_ops = {
1254 .open = vcsm_vma_open,
1255 .close = vcsm_vma_close,
1256 .fault = vcsm_vma_fault,
1257};
1258
803ef4df
SY
1259/* Converts VCSM_CACHE_OP_* to an operating function. */
1260static void (*cache_op_to_func(const unsigned cache_op))
1261 (const void*, const void*)
1262{
1263 switch (cache_op) {
1264 case VCSM_CACHE_OP_NOP:
1265 return NULL;
1266
1267 case VCSM_CACHE_OP_INV:
1268 return dmac_inv_range;
1269
1270 case VCSM_CACHE_OP_CLEAN:
1271 return dmac_clean_range;
1272
1273 case VCSM_CACHE_OP_FLUSH:
1274 return dmac_flush_range;
1275
1276 default:
1277 pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
1278 return NULL;
1279 }
1280}
1281
1282/*
1283 * Clean/invalid/flush cache of which buffer is already pinned (i.e. accessed).
1284 */
1285static int clean_invalid_contiguous_mem_2d(const void __user *addr,
6986e9a3
SY
1286 const size_t block_count, const size_t block_size, const size_t stride,
1287 const unsigned cache_op)
46bc9043 1288{
6986e9a3
SY
1289 size_t i;
1290 void (*op_fn)(const void*, const void*);
46bc9043 1291
a12252ee 1292 if (!block_size) {
6986e9a3
SY
1293 pr_err("[%s]: size cannot be 0\n", __func__);
1294 return -EINVAL;
1295 }
46bc9043 1296
803ef4df
SY
1297 op_fn = cache_op_to_func(cache_op);
1298 if (op_fn == NULL)
6986e9a3 1299 return -EINVAL;
46bc9043 1300
6986e9a3
SY
1301 for (i = 0; i < block_count; i ++, addr += stride)
1302 op_fn(addr, addr + block_size);
46bc9043 1303
6986e9a3
SY
1304 return 0;
1305}
1306
803ef4df
SY
1307/* Clean/invalid/flush cache of which buffer may be non-pinned. */
1308/* The caller must lock current->mm->mmap_sem for read. */
1309static int clean_invalid_mem_walk(unsigned long addr, const size_t size,
6986e9a3
SY
1310 const unsigned cache_op)
1311{
803ef4df
SY
1312 pgd_t *pgd;
1313 pud_t *pud;
1314 pmd_t *pmd;
1315 pte_t *pte;
1316 unsigned long pgd_next, pud_next, pmd_next;
1317 const unsigned long end = ALIGN(addr + size, PAGE_SIZE);
1318 void (*op_fn)(const void*, const void*);
1319
1320 addr &= PAGE_MASK;
1321
1322 if (addr >= end)
1323 return 0;
1324
1325 op_fn = cache_op_to_func(cache_op);
1326 if (op_fn == NULL)
1327 return -EINVAL;
1328
1329 /* Walk PGD */
1330 pgd = pgd_offset(current->mm, addr);
1331 do {
1332 pgd_next = pgd_addr_end(addr, end);
1333
1334 if (pgd_none(*pgd) || pgd_bad(*pgd))
1335 continue;
1336
1337 /* Walk PUD */
1338 pud = pud_offset(pgd, addr);
1339 do {
1340 pud_next = pud_addr_end(addr, pgd_next);
1341 if (pud_none(*pud) || pud_bad(*pud))
1342 continue;
1343
1344 /* Walk PMD */
1345 pmd = pmd_offset(pud, addr);
1346 do {
1347 pmd_next = pmd_addr_end(addr, pud_next);
1348 if (pmd_none(*pmd) || pmd_bad(*pmd))
1349 continue;
1350
1351 /* Walk PTE */
1352 pte = pte_offset_map(pmd, addr);
1353 do {
1354 if (pte_none(*pte) || !pte_present(*pte))
1355 continue;
1356
1357 op_fn((const void __user*) addr,
1358 (const void __user*) (addr + PAGE_SIZE));
1359 } while (pte++, addr += PAGE_SIZE, addr != pmd_next);
1360 pte_unmap(pte);
1361
1362 } while (pmd++, addr = pmd_next, addr != pud_next);
1363
1364 } while (pud++, addr = pud_next, addr != pgd_next);
1365
1366 } while (pgd++, addr = pgd_next, addr != end);
1367
1368 return 0;
6986e9a3
SY
1369}
1370
803ef4df
SY
1371/* Clean/invalid/flush cache of buffer in resource */
1372static int clean_invalid_resource_walk(const void __user *addr,
1373 const size_t size, const unsigned cache_op, const int usr_hdl,
6986e9a3
SY
1374 struct sm_resource_t *resource)
1375{
1376 int err;
1377 enum sm_stats_t stat_attempt, stat_failure;
1378 void __user *res_addr;
1379
1380 if (resource == NULL) {
1381 pr_err("[%s]: resource is NULL\n", __func__);
1382 return -EINVAL;
1383 }
1384 if (resource->res_cached != VMCS_SM_CACHE_HOST &&
1385 resource->res_cached != VMCS_SM_CACHE_BOTH)
1386 return 0;
1387
1388 switch (cache_op) {
a45cb5e0
SY
1389 case VCSM_CACHE_OP_NOP:
1390 return 0;
6986e9a3
SY
1391 case VCSM_CACHE_OP_INV:
1392 stat_attempt = INVALID;
1393 stat_failure = INVALID_FAIL;
1394 break;
1395 case VCSM_CACHE_OP_CLEAN:
1396 /* Like the original VMCS_SM_CMD_CLEAN_INVALID ioctl handler does. */
1397 stat_attempt = FLUSH;
1398 stat_failure = FLUSH_FAIL;
1399 break;
1400 case VCSM_CACHE_OP_FLUSH:
1401 stat_attempt = FLUSH;
1402 stat_failure = FLUSH_FAIL;
1403 break;
1404 default:
1405 pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
1406 return -EINVAL;
1407 }
1408 resource->res_stats[stat_attempt]++;
1409
1410 if (size > resource->res_size) {
1411 pr_err("[%s]: size (0x%08zu) is larger than res_size (0x%08zu)\n",
1412 __func__, size, resource->res_size);
1413 return -EFAULT;
1414 }
1415 res_addr = (void __user*) vmcs_sm_usr_address_from_pid_and_usr_handle(
1416 current->tgid, usr_hdl);
1417 if (res_addr == NULL) {
1418 pr_err("[%s]: Failed to get user address "
1419 "from pid (%d) and user handle (%d)\n", __func__, current->tgid,
1420 resource->res_handle);
1421 return -EINVAL;
1422 }
1423 if (!(res_addr <= addr && addr + size <= res_addr + resource->res_size)) {
1424 pr_err("[%s]: Addr (0x%p-0x%p) out of range (0x%p-0x%p)\n",
1425 __func__, addr, addr + size, res_addr,
1426 res_addr + resource->res_size);
1427 return -EFAULT;
1428 }
1429
803ef4df
SY
1430 down_read(&current->mm->mmap_sem);
1431 err = clean_invalid_mem_walk((unsigned long) addr, size, cache_op);
1432 up_read(&current->mm->mmap_sem);
1433
6986e9a3
SY
1434 if (err)
1435 resource->res_stats[stat_failure]++;
46bc9043 1436
6986e9a3 1437 return err;
46bc9043
TG
1438}
1439
1440/* Map an allocated data into something that the user space. */
1441static int vc_sm_mmap(struct file *file, struct vm_area_struct *vma)
1442{
1443 int ret = 0;
1444 struct sm_priv_data_t *file_data =
1445 (struct sm_priv_data_t *)file->private_data;
1446 struct sm_resource_t *resource = NULL;
1447 struct sm_mmap *map = NULL;
1448
1449 /* Make sure the device was started properly. */
1450 if ((sm_state == NULL) || (file_data == NULL)) {
1451 pr_err("[%s]: invalid device\n", __func__);
1452 return -EPERM;
1453 }
1454
1455 pr_debug("[%s]: private data %p, guid %x\n", __func__, file_data,
1456 ((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
1457
1458 /*
1459 * We lookup to make sure that the data we are being asked to mmap is
1460 * something that we allocated.
1461 *
1462 * We use the offset information as the key to tell us which resource
1463 * we are mapping.
1464 */
1465 resource = vmcs_sm_acquire_resource(file_data,
1466 ((unsigned int)vma->vm_pgoff <<
1467 PAGE_SHIFT));
1468 if (resource == NULL) {
1469 pr_err("[%s]: failed to locate resource for guid %x\n", __func__,
1470 ((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
1471 return -ENOMEM;
1472 }
1473
1474 pr_debug("[%s]: guid %x, tgid %u, %u, %u\n",
1475 __func__, resource->res_guid, current->tgid, resource->pid,
1476 file_data->pid);
1477
1478 /* Check permissions. */
1479 if (resource->pid && (resource->pid != current->tgid)) {
1480 pr_err("[%s]: current tgid %u != %u owner\n",
1481 __func__, current->tgid, resource->pid);
1482 ret = -EPERM;
1483 goto error;
1484 }
1485
1486 /* Verify that what we are asked to mmap is proper. */
1487 if (resource->res_size != (unsigned int)(vma->vm_end - vma->vm_start)) {
1488 pr_err("[%s]: size inconsistency (resource: %u - mmap: %u)\n",
1489 __func__,
1490 resource->res_size,
1491 (unsigned int)(vma->vm_end - vma->vm_start));
1492
1493 ret = -EINVAL;
1494 goto error;
1495 }
1496
1497 /*
1498 * Keep track of the tuple in the global resource list such that one
1499 * can do a mapping lookup for address/memory handle.
1500 */
1501 map = kzalloc(sizeof(*map), GFP_KERNEL);
1502 if (map == NULL) {
1503 pr_err("[%s]: failed to allocate global tracking resource\n",
1504 __func__);
1505 ret = -ENOMEM;
1506 goto error;
1507 }
1508
1509 map->res_pid = current->tgid;
1510 map->res_vc_hdl = resource->res_handle;
1511 map->res_usr_hdl = resource->res_guid;
1512 map->res_addr = (unsigned long)vma->vm_start;
1513 map->resource = resource;
1514 map->vma = vma;
1515 vmcs_sm_add_map(sm_state, resource, map);
1516
1517 /*
1518 * We are not actually mapping the pages, we just provide a fault
1519 * handler to allow pages to be mapped when accessed
1520 */
1521 vma->vm_flags |=
1522 VM_IO | VM_PFNMAP | VM_DONTCOPY | VM_DONTEXPAND;
1523 vma->vm_ops = &vcsm_vm_ops;
1524 vma->vm_private_data = map;
1525
1526 /* vm_pgoff is the first PFN of the mapped memory */
1527 vma->vm_pgoff = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
1528 vma->vm_pgoff += mm_vc_mem_phys_addr;
1529 vma->vm_pgoff >>= PAGE_SHIFT;
1530
1531 if ((resource->res_cached == VMCS_SM_CACHE_NONE) ||
1532 (resource->res_cached == VMCS_SM_CACHE_VC)) {
1533 /* Allocated non host cached memory, honour it. */
1534 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1535 }
1536
1537 pr_debug("[%s]: resource %p (guid %x) - cnt %u, base address %p, handle %x, size %u (%u), cache %u\n",
1538 __func__,
1539 resource, resource->res_guid, resource->lock_count,
1540 resource->res_base_mem, resource->res_handle,
1541 resource->res_size, (unsigned int)(vma->vm_end - vma->vm_start),
1542 resource->res_cached);
1543
1544 pr_debug("[%s]: resource %p (base address %p, handle %x) - map-count %d, usr-addr %x\n",
1545 __func__, resource, resource->res_base_mem,
1546 resource->res_handle, resource->map_count,
1547 (unsigned int)vma->vm_start);
1548
1549 vcsm_vma_open(vma);
1550 resource->res_stats[MAP]++;
1551 vmcs_sm_release_resource(resource, 0);
1552
1553 if (resource->map) {
1554 /* We don't use vmf->pgoff since that has the fake offset */
1555 unsigned long addr;
1556
1557 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
1558 /* Finally, remap it */
1559 unsigned long pfn = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
1560
1561 pfn += mm_vc_mem_phys_addr;
1562 pfn += addr - vma->vm_start;
1563 pfn >>= PAGE_SHIFT;
1564 ret = vm_insert_pfn(vma, addr, pfn);
1565 }
1566 }
1567
1568 return 0;
1569
1570error:
1571 resource->res_stats[MAP_FAIL]++;
1572 vmcs_sm_release_resource(resource, 0);
1573 return ret;
1574}
1575
1576/* Allocate a shared memory handle and block. */
1577int vc_sm_ioctl_alloc(struct sm_priv_data_t *private,
1578 struct vmcs_sm_ioctl_alloc *ioparam)
1579{
1580 int ret = 0;
1581 int status;
1582 struct sm_resource_t *resource;
1583 struct vc_sm_alloc_t alloc = { 0 };
1584 struct vc_sm_alloc_result_t result = { 0 };
1585 enum vmcs_sm_cache_e cached = ioparam->cached;
1586 bool map = false;
1587
1588 /* flag to requst buffer is mapped up front, rather than lazily */
1589 if (cached & 0x80) {
1590 map = true;
1591 cached &= ~0x80;
1592 }
1593
1594 /* Setup our allocation parameters */
1595 alloc.type = ((cached == VMCS_SM_CACHE_VC)
1596 || (cached ==
1597 VMCS_SM_CACHE_BOTH)) ? VC_SM_ALLOC_CACHED :
1598 VC_SM_ALLOC_NON_CACHED;
1599 alloc.base_unit = ioparam->size;
1600 alloc.num_unit = ioparam->num;
1601 alloc.allocator = current->tgid;
1602 /* Align to kernel page size */
1603 alloc.alignement = 4096;
1604 /* Align the size to the kernel page size */
1605 alloc.base_unit =
1606 (alloc.base_unit + alloc.alignement - 1) & ~(alloc.alignement - 1);
1607 if (*ioparam->name) {
1608 memcpy(alloc.name, ioparam->name, sizeof(alloc.name) - 1);
1609 } else {
1610 memcpy(alloc.name, VMCS_SM_RESOURCE_NAME_DEFAULT,
1611 sizeof(VMCS_SM_RESOURCE_NAME_DEFAULT));
1612 }
1613
1614 pr_debug("[%s]: attempt to allocate \"%s\" data - type %u, base %u (%u), num %u, alignement %u\n",
1615 __func__, alloc.name, alloc.type, ioparam->size,
1616 alloc.base_unit, alloc.num_unit, alloc.alignement);
1617
1618 /* Allocate local resource to track this allocation. */
1619 resource = kzalloc(sizeof(*resource), GFP_KERNEL);
1620 if (!resource) {
1621 ret = -ENOMEM;
1622 goto error;
1623 }
1624 INIT_LIST_HEAD(&resource->map_list);
1625 resource->ref_count++;
1626 resource->pid = current->tgid;
1627
1628 /* Allocate the videocore resource. */
1629 status = vc_vchi_sm_alloc(sm_state->sm_handle, &alloc, &result,
1630 &private->int_trans_id);
1631 if (status == -EINTR) {
1632 pr_debug("[%s]: requesting allocate memory action restart (trans_id: %u)\n",
1633 __func__, private->int_trans_id);
1634 ret = -ERESTARTSYS;
1635 private->restart_sys = -EINTR;
1636 private->int_action = VC_SM_MSG_TYPE_ALLOC;
1637 goto error;
1638 } else if (status != 0 || !result.res_mem) {
1639 pr_err("[%s]: failed to allocate memory on videocore (status: %u, trans_id: %u)\n",
1640 __func__, status, private->int_trans_id);
1641 ret = -ENOMEM;
1642 resource->res_stats[ALLOC_FAIL]++;
1643 goto error;
1644 }
1645
1646 /* Keep track of the resource we created. */
1647 resource->private = private;
1648 resource->res_handle = result.res_handle;
1649 resource->res_base_mem = (void *)result.res_mem;
1650 resource->res_size = alloc.base_unit * alloc.num_unit;
1651 resource->res_cached = cached;
1652 resource->map = map;
1653
1654 /*
1655 * Kernel/user GUID. This global identifier is used for mmap'ing the
1656 * allocated region from user space, it is passed as the mmap'ing
1657 * offset, we use it to 'hide' the videocore handle/address.
1658 */
1659 mutex_lock(&sm_state->lock);
1660 resource->res_guid = ++sm_state->guid;
1661 mutex_unlock(&sm_state->lock);
1662 resource->res_guid <<= PAGE_SHIFT;
1663
1664 vmcs_sm_add_resource(private, resource);
1665
1666 pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
1667 __func__, resource->res_guid, resource->res_handle,
1668 resource->res_base_mem, resource->res_size,
1669 resource->res_cached);
1670
1671 /* We're done */
1672 resource->res_stats[ALLOC]++;
1673 ioparam->handle = resource->res_guid;
1674 return 0;
1675
1676error:
1677 pr_err("[%s]: failed to allocate \"%s\" data (%i) - type %u, base %u (%u), num %u, alignment %u\n",
1678 __func__, alloc.name, ret, alloc.type, ioparam->size,
1679 alloc.base_unit, alloc.num_unit, alloc.alignement);
1680 if (resource != NULL) {
1681 vc_sm_resource_deceased(resource, 1);
1682 kfree(resource);
1683 }
1684 return ret;
1685}
1686
1687/* Share an allocate memory handle and block.*/
1688int vc_sm_ioctl_alloc_share(struct sm_priv_data_t *private,
1689 struct vmcs_sm_ioctl_alloc_share *ioparam)
1690{
1691 struct sm_resource_t *resource, *shared_resource;
1692 int ret = 0;
1693
1694 pr_debug("[%s]: attempt to share resource %u\n", __func__,
1695 ioparam->handle);
1696
1697 shared_resource = vmcs_sm_acquire_global_resource(ioparam->handle);
1698 if (shared_resource == NULL) {
1699 ret = -ENOMEM;
1700 goto error;
1701 }
1702
1703 /* Allocate local resource to track this allocation. */
1704 resource = kzalloc(sizeof(*resource), GFP_KERNEL);
1705 if (resource == NULL) {
1706 pr_err("[%s]: failed to allocate local tracking resource\n",
1707 __func__);
1708 ret = -ENOMEM;
1709 goto error;
1710 }
1711 INIT_LIST_HEAD(&resource->map_list);
1712 resource->ref_count++;
1713 resource->pid = current->tgid;
1714
1715 /* Keep track of the resource we created. */
1716 resource->private = private;
1717 resource->res_handle = shared_resource->res_handle;
1718 resource->res_base_mem = shared_resource->res_base_mem;
1719 resource->res_size = shared_resource->res_size;
1720 resource->res_cached = shared_resource->res_cached;
1721 resource->res_shared = shared_resource;
1722
1723 mutex_lock(&sm_state->lock);
1724 resource->res_guid = ++sm_state->guid;
1725 mutex_unlock(&sm_state->lock);
1726 resource->res_guid <<= PAGE_SHIFT;
1727
1728 vmcs_sm_add_resource(private, resource);
1729
1730 pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
1731 __func__, resource->res_guid, resource->res_handle,
1732 resource->res_base_mem, resource->res_size,
1733 resource->res_cached);
1734
1735 /* We're done */
1736 resource->res_stats[ALLOC]++;
1737 ioparam->handle = resource->res_guid;
1738 ioparam->size = resource->res_size;
1739 return 0;
1740
1741error:
1742 pr_err("[%s]: failed to share %u\n", __func__, ioparam->handle);
1743 if (shared_resource != NULL)
1744 vmcs_sm_release_resource(shared_resource, 0);
1745
1746 return ret;
1747}
1748
1749/* Free a previously allocated shared memory handle and block.*/
1750static int vc_sm_ioctl_free(struct sm_priv_data_t *private,
1751 struct vmcs_sm_ioctl_free *ioparam)
1752{
1753 struct sm_resource_t *resource =
1754 vmcs_sm_acquire_resource(private, ioparam->handle);
1755
1756 if (resource == NULL) {
1757 pr_err("[%s]: resource for guid %u does not exist\n", __func__,
1758 ioparam->handle);
1759 return -EINVAL;
1760 }
1761
1762 /* Check permissions. */
1763 if (resource->pid && (resource->pid != current->tgid)) {
1764 pr_err("[%s]: current tgid %u != %u owner\n",
1765 __func__, current->tgid, resource->pid);
1766 vmcs_sm_release_resource(resource, 0);
1767 return -EPERM;
1768 }
1769
1770 vmcs_sm_release_resource(resource, 0);
1771 vmcs_sm_release_resource(resource, 0);
1772 return 0;
1773}
1774
1775/* Resize a previously allocated shared memory handle and block. */
1776static int vc_sm_ioctl_resize(struct sm_priv_data_t *private,
1777 struct vmcs_sm_ioctl_resize *ioparam)
1778{
1779 int ret = 0;
1780 int status;
1781 struct vc_sm_resize_t resize;
1782 struct sm_resource_t *resource;
1783
1784 /* Locate resource from GUID. */
1785 resource = vmcs_sm_acquire_resource(private, ioparam->handle);
1786 if (!resource) {
1787 pr_err("[%s]: failed resource - guid %x\n",
1788 __func__, ioparam->handle);
1789 ret = -EFAULT;
1790 goto error;
1791 }
1792
1793 /*
1794 * If the resource is locked, its reference count will be not NULL,
1795 * in which case we will not be allowed to resize it anyways, so
1796 * reject the attempt here.
1797 */
1798 if (resource->lock_count != 0) {
1799 pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
1800 __func__, ioparam->handle, resource->lock_count);
1801 ret = -EFAULT;
1802 goto error;
1803 }
1804
1805 /* Check permissions. */
1806 if (resource->pid && (resource->pid != current->tgid)) {
1807 pr_err("[%s]: current tgid %u != %u owner\n", __func__,
1808 current->tgid, resource->pid);
1809 ret = -EPERM;
1810 goto error;
1811 }
1812
1813 if (resource->map_count != 0) {
1814 pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
1815 __func__, ioparam->handle, resource->map_count);
1816 ret = -EFAULT;
1817 goto error;
1818 }
1819
1820 resize.res_handle = resource->res_handle;
1821 resize.res_mem = (uint32_t)resource->res_base_mem;
1822 resize.res_new_size = ioparam->new_size;
1823
1824 pr_debug("[%s]: attempt to resize data - guid %x, hdl %x, base address %p\n",
1825 __func__, ioparam->handle, resize.res_handle,
1826 (void *)resize.res_mem);
1827
1828 /* Resize the videocore allocated resource. */
1829 status = vc_vchi_sm_resize(sm_state->sm_handle, &resize,
1830 &private->int_trans_id);
1831 if (status == -EINTR) {
1832 pr_debug("[%s]: requesting resize memory action restart (trans_id: %u)\n",
1833 __func__, private->int_trans_id);
1834 ret = -ERESTARTSYS;
1835 private->restart_sys = -EINTR;
1836 private->int_action = VC_SM_MSG_TYPE_RESIZE;
1837 goto error;
1838 } else if (status) {
1839 pr_err("[%s]: failed to resize memory on videocore (status: %u, trans_id: %u)\n",
1840 __func__, status, private->int_trans_id);
1841 ret = -EPERM;
1842 goto error;
1843 }
1844
1845 pr_debug("[%s]: success to resize data - hdl %x, size %d -> %d\n",
1846 __func__, resize.res_handle, resource->res_size,
1847 resize.res_new_size);
1848
1849 /* Successfully resized, save the information and inform the user. */
1850 ioparam->old_size = resource->res_size;
1851 resource->res_size = resize.res_new_size;
1852
1853error:
1854 if (resource)
1855 vmcs_sm_release_resource(resource, 0);
1856
1857 return ret;
1858}
1859
1860/* Lock a previously allocated shared memory handle and block. */
1861static int vc_sm_ioctl_lock(struct sm_priv_data_t *private,
1862 struct vmcs_sm_ioctl_lock_unlock *ioparam,
1863 int change_cache, enum vmcs_sm_cache_e cache_type,
1864 unsigned int vc_addr)
1865{
1866 int status;
1867 struct vc_sm_lock_unlock_t lock;
1868 struct vc_sm_lock_result_t result;
1869 struct sm_resource_t *resource;
1870 int ret = 0;
1871 struct sm_mmap *map, *map_tmp;
1872 unsigned long phys_addr;
1873
1874 map = NULL;
1875
1876 /* Locate resource from GUID. */
1877 resource = vmcs_sm_acquire_resource(private, ioparam->handle);
1878 if (resource == NULL) {
1879 ret = -EINVAL;
1880 goto error;
1881 }
1882
1883 /* Check permissions. */
1884 if (resource->pid && (resource->pid != current->tgid)) {
1885 pr_err("[%s]: current tgid %u != %u owner\n", __func__,
1886 current->tgid, resource->pid);
1887 ret = -EPERM;
1888 goto error;
1889 }
1890
1891 lock.res_handle = resource->res_handle;
1892 lock.res_mem = (uint32_t)resource->res_base_mem;
1893
1894 /* Take the lock and get the address to be mapped. */
1895 if (vc_addr == 0) {
1896 pr_debug("[%s]: attempt to lock data - guid %x, hdl %x, base address %p\n",
1897 __func__, ioparam->handle, lock.res_handle,
1898 (void *)lock.res_mem);
1899
1900 /* Lock the videocore allocated resource. */
1901 status = vc_vchi_sm_lock(sm_state->sm_handle, &lock, &result,
1902 &private->int_trans_id);
1903 if (status == -EINTR) {
1904 pr_debug("[%s]: requesting lock memory action restart (trans_id: %u)\n",
1905 __func__, private->int_trans_id);
1906 ret = -ERESTARTSYS;
1907 private->restart_sys = -EINTR;
1908 private->int_action = VC_SM_MSG_TYPE_LOCK;
1909 goto error;
1910 } else if (status ||
1911 (!status && !(void *)result.res_mem)) {
1912 pr_err("[%s]: failed to lock memory on videocore (status: %u, trans_id: %u)\n",
1913 __func__, status, private->int_trans_id);
1914 ret = -EPERM;
1915 resource->res_stats[LOCK_FAIL]++;
1916 goto error;
1917 }
1918
1919 pr_debug("[%s]: succeed to lock data - hdl %x, base address %p (%p), ref-cnt %d\n",
1920 __func__, lock.res_handle, (void *)result.res_mem,
1921 (void *)lock.res_mem, resource->lock_count);
1922 }
1923 /* Lock assumed taken already, address to be mapped is known. */
1924 else
1925 resource->res_base_mem = (void *)vc_addr;
1926
1927 resource->res_stats[LOCK]++;
1928 resource->lock_count++;
1929
1930 /* Keep track of the new base memory allocation if it has changed. */
1931 if ((vc_addr == 0) &&
1932 ((void *)result.res_mem) &&
1933 ((void *)result.res_old_mem) &&
1934 (result.res_mem != result.res_old_mem)) {
1935 resource->res_base_mem = (void *)result.res_mem;
1936
1937 /* Kernel allocated resources. */
1938 if (resource->pid == 0) {
1939 if (!list_empty(&resource->map_list)) {
1940 list_for_each_entry_safe(map, map_tmp,
1941 &resource->map_list,
1942 resource_map_list) {
1943 if (map->res_addr) {
1944 iounmap((void *)map->res_addr);
1945 map->res_addr = 0;
1946
1947 vmcs_sm_remove_map(sm_state,
1948 map->resource,
1949 map);
1950 break;
1951 }
1952 }
1953 }
1954 }
1955 }
1956
1957 if (change_cache)
1958 resource->res_cached = cache_type;
1959
1960 if (resource->map_count) {
1961 ioparam->addr =
1962 vmcs_sm_usr_address_from_pid_and_usr_handle(
1963 current->tgid, ioparam->handle);
1964
1965 pr_debug("[%s] map_count %d private->pid %d current->tgid %d hnd %x addr %u\n",
1966 __func__, resource->map_count, private->pid,
1967 current->tgid, ioparam->handle, ioparam->addr);
1968 } else {
1969 /* Kernel allocated resources. */
1970 if (resource->pid == 0) {
1971 pr_debug("[%s]: attempt mapping kernel resource - guid %x, hdl %x\n",
1972 __func__, ioparam->handle, lock.res_handle);
1973
1974 ioparam->addr = 0;
1975
1976 map = kzalloc(sizeof(*map), GFP_KERNEL);
1977 if (map == NULL) {
1978 pr_err("[%s]: failed allocating tracker\n",
1979 __func__);
1980 ret = -ENOMEM;
1981 goto error;
1982 } else {
1983 phys_addr = (uint32_t)resource->res_base_mem &
1984 0x3FFFFFFF;
1985 phys_addr += mm_vc_mem_phys_addr;
1986 if (resource->res_cached
1987 == VMCS_SM_CACHE_HOST) {
1988 ioparam->addr = (unsigned long)
1989 /* TODO - make cached work */
1990 ioremap_nocache(phys_addr,
1991 resource->res_size);
1992
1993 pr_debug("[%s]: mapping kernel - guid %x, hdl %x - cached mapping %u\n",
1994 __func__, ioparam->handle,
1995 lock.res_handle, ioparam->addr);
1996 } else {
1997 ioparam->addr = (unsigned long)
1998 ioremap_nocache(phys_addr,
1999 resource->res_size);
2000
2001 pr_debug("[%s]: mapping kernel- guid %x, hdl %x - non cached mapping %u\n",
2002 __func__, ioparam->handle,
2003 lock.res_handle, ioparam->addr);
2004 }
2005
2006 map->res_pid = 0;
2007 map->res_vc_hdl = resource->res_handle;
2008 map->res_usr_hdl = resource->res_guid;
2009 map->res_addr = ioparam->addr;
2010 map->resource = resource;
2011 map->vma = NULL;
2012
2013 vmcs_sm_add_map(sm_state, resource, map);
2014 }
2015 } else
2016 ioparam->addr = 0;
2017 }
2018
2019error:
2020 if (resource)
2021 vmcs_sm_release_resource(resource, 0);
2022
2023 return ret;
2024}
2025
2026/* Unlock a previously allocated shared memory handle and block.*/
2027static int vc_sm_ioctl_unlock(struct sm_priv_data_t *private,
2028 struct vmcs_sm_ioctl_lock_unlock *ioparam,
2029 int flush, int wait_reply, int no_vc_unlock)
2030{
2031 int status;
2032 struct vc_sm_lock_unlock_t unlock;
2033 struct sm_mmap *map, *map_tmp;
2034 struct sm_resource_t *resource;
2035 int ret = 0;
2036
2037 map = NULL;
2038
2039 /* Locate resource from GUID. */
2040 resource = vmcs_sm_acquire_resource(private, ioparam->handle);
2041 if (resource == NULL) {
2042 ret = -EINVAL;
2043 goto error;
2044 }
2045
2046 /* Check permissions. */
2047 if (resource->pid && (resource->pid != current->tgid)) {
2048 pr_err("[%s]: current tgid %u != %u owner\n",
2049 __func__, current->tgid, resource->pid);
2050 ret = -EPERM;
2051 goto error;
2052 }
2053
2054 unlock.res_handle = resource->res_handle;
2055 unlock.res_mem = (uint32_t)resource->res_base_mem;
2056
2057 pr_debug("[%s]: attempt to unlock data - guid %x, hdl %x, base address %p\n",
2058 __func__, ioparam->handle, unlock.res_handle,
2059 (void *)unlock.res_mem);
2060
2061 /* User space allocated resources. */
2062 if (resource->pid) {
2063 /* Flush if requested */
2064 if (resource->res_cached && flush) {
2065 dma_addr_t phys_addr = 0;
2066
2067 resource->res_stats[FLUSH]++;
2068
2069 phys_addr =
2070 (dma_addr_t)((uint32_t)resource->res_base_mem &
2071 0x3FFFFFFF);
2072 phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
2073
2074 /* L1 cache flush */
2075 down_read(&current->mm->mmap_sem);
2076 list_for_each_entry(map, &resource->map_list,
2077 resource_map_list) {
2078 if (map->vma) {
6986e9a3
SY
2079 const unsigned long start = map->vma->vm_start;
2080 const unsigned long end = map->vma->vm_end;
46bc9043 2081
803ef4df 2082 ret = clean_invalid_mem_walk(start, end - start,
6986e9a3
SY
2083 VCSM_CACHE_OP_FLUSH);
2084 if (ret)
2085 goto error;
46bc9043
TG
2086 }
2087 }
2088 up_read(&current->mm->mmap_sem);
2089
2090 /* L2 cache flush */
2091 outer_clean_range(phys_addr,
2092 phys_addr +
2093 (size_t) resource->res_size);
2094 }
2095
2096 /* We need to zap all the vmas associated with this resource */
2097 if (resource->lock_count == 1) {
2098 down_read(&current->mm->mmap_sem);
2099 list_for_each_entry(map, &resource->map_list,
2100 resource_map_list) {
2101 if (map->vma) {
2102 zap_vma_ptes(map->vma,
2103 map->vma->vm_start,
2104 map->vma->vm_end -
2105 map->vma->vm_start);
2106 }
2107 }
2108 up_read(&current->mm->mmap_sem);
2109 }
2110 }
2111 /* Kernel allocated resources. */
2112 else {
2113 /* Global + Taken in this context */
2114 if (resource->ref_count == 2) {
2115 if (!list_empty(&resource->map_list)) {
2116 list_for_each_entry_safe(map, map_tmp,
2117 &resource->map_list,
2118 resource_map_list) {
2119 if (map->res_addr) {
2120 if (flush &&
2121 (resource->res_cached ==
2122 VMCS_SM_CACHE_HOST)) {
2123 unsigned long
2124 phys_addr;
2125 phys_addr = (uint32_t)
2126 resource->res_base_mem & 0x3FFFFFFF;
2127 phys_addr +=
2128 mm_vc_mem_phys_addr;
2129
2130 /* L1 cache flush */
2131 dmac_flush_range((const
2132 void
2133 *)
2134 map->res_addr, (const void *)
2135 (map->res_addr + resource->res_size));
2136
2137 /* L2 cache flush */
2138 outer_clean_range
2139 (phys_addr,
2140 phys_addr +
2141 (size_t)
2142 resource->res_size);
2143 }
2144
2145 iounmap((void *)map->res_addr);
2146 map->res_addr = 0;
2147
2148 vmcs_sm_remove_map(sm_state,
2149 map->resource,
2150 map);
2151 break;
2152 }
2153 }
2154 }
2155 }
2156 }
2157
2158 if (resource->lock_count) {
2159 /* Bypass the videocore unlock. */
2160 if (no_vc_unlock)
2161 status = 0;
2162 /* Unlock the videocore allocated resource. */
2163 else {
2164 status =
2165 vc_vchi_sm_unlock(sm_state->sm_handle, &unlock,
2166 &private->int_trans_id,
2167 wait_reply);
2168 if (status == -EINTR) {
2169 pr_debug("[%s]: requesting unlock memory action restart (trans_id: %u)\n",
2170 __func__, private->int_trans_id);
2171
2172 ret = -ERESTARTSYS;
2173 resource->res_stats[UNLOCK]--;
2174 private->restart_sys = -EINTR;
2175 private->int_action = VC_SM_MSG_TYPE_UNLOCK;
2176 goto error;
2177 } else if (status != 0) {
2178 pr_err("[%s]: failed to unlock vc mem (status: %u, trans_id: %u)\n",
2179 __func__, status, private->int_trans_id);
2180
2181 ret = -EPERM;
2182 resource->res_stats[UNLOCK_FAIL]++;
2183 goto error;
2184 }
2185 }
2186
2187 resource->res_stats[UNLOCK]++;
2188 resource->lock_count--;
2189 }
2190
2191 pr_debug("[%s]: success to unlock data - hdl %x, base address %p, ref-cnt %d\n",
2192 __func__, unlock.res_handle, (void *)unlock.res_mem,
2193 resource->lock_count);
2194
2195error:
2196 if (resource)
2197 vmcs_sm_release_resource(resource, 0);
2198
2199 return ret;
2200}
2201
2202/* Import a contiguous block of memory to be shared with VC. */
2203int vc_sm_ioctl_import_dmabuf(struct sm_priv_data_t *private,
2204 struct vmcs_sm_ioctl_import_dmabuf *ioparam,
2205 struct dma_buf *src_dma_buf)
2206{
2207 int ret = 0;
2208 int status;
2209 struct sm_resource_t *resource = NULL;
2210 struct vc_sm_import import = { 0 };
2211 struct vc_sm_import_result result = { 0 };
2212 struct dma_buf *dma_buf;
2213 struct dma_buf_attachment *attach = NULL;
2214 struct sg_table *sgt = NULL;
2215
2216 /* Setup our allocation parameters */
2217 if (src_dma_buf) {
2218 get_dma_buf(src_dma_buf);
2219 dma_buf = src_dma_buf;
2220 } else {
2221 dma_buf = dma_buf_get(ioparam->dmabuf_fd);
2222 }
2223 if (IS_ERR(dma_buf))
2224 return PTR_ERR(dma_buf);
2225
2226 attach = dma_buf_attach(dma_buf, &sm_state->pdev->dev);
2227 if (IS_ERR(attach)) {
2228 ret = PTR_ERR(attach);
2229 goto error;
2230 }
2231
2232 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
2233 if (IS_ERR(sgt)) {
2234 ret = PTR_ERR(sgt);
2235 goto error;
2236 }
2237
2238 /* Verify that the address block is contiguous */
2239 if (sgt->nents != 1) {
2240 ret = -ENOMEM;
2241 goto error;
2242 }
2243
2244 import.type = ((ioparam->cached == VMCS_SM_CACHE_VC) ||
2245 (ioparam->cached == VMCS_SM_CACHE_BOTH)) ?
2246 VC_SM_ALLOC_CACHED : VC_SM_ALLOC_NON_CACHED;
2247 import.addr = (uint32_t)sg_dma_address(sgt->sgl);
2248 import.size = sg_dma_len(sgt->sgl);
2249 import.allocator = current->tgid;
2250
2251 if (*ioparam->name)
2252 memcpy(import.name, ioparam->name, sizeof(import.name) - 1);
2253 else
2254 memcpy(import.name, VMCS_SM_RESOURCE_NAME_DEFAULT,
2255 sizeof(VMCS_SM_RESOURCE_NAME_DEFAULT));
2256
2257 pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %p, size %u\n",
2258 __func__, import.name, import.type,
2259 (void *)import.addr, import.size);
2260
2261 /* Allocate local resource to track this allocation. */
2262 resource = kzalloc(sizeof(*resource), GFP_KERNEL);
2263 if (!resource) {
2264 ret = -ENOMEM;
2265 goto error;
2266 }
2267 INIT_LIST_HEAD(&resource->map_list);
2268 resource->ref_count++;
2269 resource->pid = current->tgid;
2270
2271 /* Allocate the videocore resource. */
2272 status = vc_vchi_sm_import(sm_state->sm_handle, &import, &result,
2273 &private->int_trans_id);
2274 if (status == -EINTR) {
2275 pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
2276 __func__, private->int_trans_id);
2277 ret = -ERESTARTSYS;
2278 private->restart_sys = -EINTR;
2279 private->int_action = VC_SM_MSG_TYPE_IMPORT;
2280 goto error;
2281 } else if (status || !result.res_handle) {
2282 pr_debug("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
2283 __func__, status, private->int_trans_id);
2284 ret = -ENOMEM;
2285 resource->res_stats[ALLOC_FAIL]++;
2286 goto error;
2287 }
2288
2289 /* Keep track of the resource we created. */
2290 resource->private = private;
2291 resource->res_handle = result.res_handle;
2292 resource->res_size = import.size;
2293 resource->res_cached = ioparam->cached;
2294
2295 resource->dma_buf = dma_buf;
2296 resource->attach = attach;
2297 resource->sgt = sgt;
2298 resource->dma_addr = sg_dma_address(sgt->sgl);
2299
2300 /*
2301 * Kernel/user GUID. This global identifier is used for mmap'ing the
2302 * allocated region from user space, it is passed as the mmap'ing
2303 * offset, we use it to 'hide' the videocore handle/address.
2304 */
2305 mutex_lock(&sm_state->lock);
2306 resource->res_guid = ++sm_state->guid;
2307 mutex_unlock(&sm_state->lock);
2308 resource->res_guid <<= PAGE_SHIFT;
2309
2310 vmcs_sm_add_resource(private, resource);
2311
2312 /* We're done */
2313 resource->res_stats[IMPORT]++;
2314 ioparam->handle = resource->res_guid;
2315 return 0;
2316
2317error:
2318 resource->res_stats[IMPORT_FAIL]++;
2319 if (resource) {
2320 vc_sm_resource_deceased(resource, 1);
2321 kfree(resource);
2322 }
2323 if (sgt)
2324 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
2325 if (attach)
2326 dma_buf_detach(dma_buf, attach);
2327 dma_buf_put(dma_buf);
2328 return ret;
2329}
2330
2331/* Handle control from host. */
2332static long vc_sm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2333{
2334 int ret = 0;
2335 unsigned int cmdnr = _IOC_NR(cmd);
2336 struct sm_priv_data_t *file_data =
2337 (struct sm_priv_data_t *)file->private_data;
2338 struct sm_resource_t *resource = NULL;
2339
2340 /* Validate we can work with this device. */
2341 if ((sm_state == NULL) || (file_data == NULL)) {
2342 pr_err("[%s]: invalid device\n", __func__);
2343 ret = -EPERM;
2344 goto out;
2345 }
2346
2347 pr_debug("[%s]: cmd %x tgid %u, owner %u\n", __func__, cmdnr,
2348 current->tgid, file_data->pid);
2349
2350 /* Action is a re-post of a previously interrupted action? */
2351 if (file_data->restart_sys == -EINTR) {
2352 struct vc_sm_action_clean_t action_clean;
2353
2354 pr_debug("[%s]: clean up of action %u (trans_id: %u) following EINTR\n",
2355 __func__, file_data->int_action,
2356 file_data->int_trans_id);
2357
2358 action_clean.res_action = file_data->int_action;
2359 action_clean.action_trans_id = file_data->int_trans_id;
2360
2361 vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
2362
2363 file_data->restart_sys = 0;
2364 }
2365
2366 /* Now process the command. */
2367 switch (cmdnr) {
2368 /* New memory allocation.
2369 */
2370 case VMCS_SM_CMD_ALLOC:
2371 {
2372 struct vmcs_sm_ioctl_alloc ioparam;
2373
2374 /* Get the parameter data. */
2375 if (copy_from_user
2376 (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
2377 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2378 __func__, cmdnr);
2379 ret = -EFAULT;
2380 goto out;
2381 }
2382
2383 ret = vc_sm_ioctl_alloc(file_data, &ioparam);
2384 if (!ret &&
2385 (copy_to_user((void *)arg,
2386 &ioparam, sizeof(ioparam)) != 0)) {
2387 struct vmcs_sm_ioctl_free freeparam = {
2388 ioparam.handle
2389 };
2390 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2391 __func__, cmdnr);
2392 vc_sm_ioctl_free(file_data, &freeparam);
2393 ret = -EFAULT;
2394 }
2395
2396 /* Done. */
2397 goto out;
2398 }
2399 break;
2400
2401 /* Share existing memory allocation. */
2402 case VMCS_SM_CMD_ALLOC_SHARE:
2403 {
2404 struct vmcs_sm_ioctl_alloc_share ioparam;
2405
2406 /* Get the parameter data. */
2407 if (copy_from_user
2408 (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
2409 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2410 __func__, cmdnr);
2411 ret = -EFAULT;
2412 goto out;
2413 }
2414
2415 ret = vc_sm_ioctl_alloc_share(file_data, &ioparam);
2416
2417 /* Copy result back to user. */
2418 if (!ret
2419 && copy_to_user((void *)arg, &ioparam,
2420 sizeof(ioparam)) != 0) {
2421 struct vmcs_sm_ioctl_free freeparam = {
2422 ioparam.handle
2423 };
2424 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2425 __func__, cmdnr);
2426 vc_sm_ioctl_free(file_data, &freeparam);
2427 ret = -EFAULT;
2428 }
2429
2430 /* Done. */
2431 goto out;
2432 }
2433 break;
2434
2435 case VMCS_SM_CMD_IMPORT_DMABUF:
2436 {
2437 struct vmcs_sm_ioctl_import_dmabuf ioparam;
2438
2439 /* Get the parameter data. */
2440 if (copy_from_user
2441 (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
2442 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2443 __func__, cmdnr);
2444 ret = -EFAULT;
2445 goto out;
2446 }
2447
2448 ret = vc_sm_ioctl_import_dmabuf(file_data, &ioparam,
2449 NULL);
2450 if (!ret &&
2451 (copy_to_user((void *)arg,
2452 &ioparam, sizeof(ioparam)) != 0)) {
2453 struct vmcs_sm_ioctl_free freeparam = {
2454 ioparam.handle
2455 };
2456 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2457 __func__, cmdnr);
2458 vc_sm_ioctl_free(file_data, &freeparam);
2459 ret = -EFAULT;
2460 }
2461
2462 /* Done. */
2463 goto out;
2464 }
2465 break;
2466
2467 /* Lock (attempt to) *and* register a cache behavior change. */
2468 case VMCS_SM_CMD_LOCK_CACHE:
2469 {
2470 struct vmcs_sm_ioctl_lock_cache ioparam;
2471 struct vmcs_sm_ioctl_lock_unlock lock;
2472
2473 /* Get parameter data. */
2474 if (copy_from_user
2475 (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
2476 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2477 __func__, cmdnr);
2478 ret = -EFAULT;
2479 goto out;
2480 }
2481
2482 lock.handle = ioparam.handle;
2483 ret =
2484 vc_sm_ioctl_lock(file_data, &lock, 1,
2485 ioparam.cached, 0);
2486
2487 /* Done. */
2488 goto out;
2489 }
2490 break;
2491
2492 /* Lock (attempt to) existing memory allocation. */
2493 case VMCS_SM_CMD_LOCK:
2494 {
2495 struct vmcs_sm_ioctl_lock_unlock ioparam;
2496
2497 /* Get parameter data. */
2498 if (copy_from_user
2499 (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
2500 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2501 __func__, cmdnr);
2502 ret = -EFAULT;
2503 goto out;
2504 }
2505
2506 ret = vc_sm_ioctl_lock(file_data, &ioparam, 0, 0, 0);
2507
2508 /* Copy result back to user. */
2509 if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
2510 != 0) {
2511 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2512 __func__, cmdnr);
2513 ret = -EFAULT;
2514 }
2515
2516 /* Done. */
2517 goto out;
2518 }
2519 break;
2520
2521 /* Unlock (attempt to) existing memory allocation. */
2522 case VMCS_SM_CMD_UNLOCK:
2523 {
2524 struct vmcs_sm_ioctl_lock_unlock ioparam;
2525
2526 /* Get parameter data. */
2527 if (copy_from_user
2528 (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
2529 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2530 __func__, cmdnr);
2531 ret = -EFAULT;
2532 goto out;
2533 }
2534
2535 ret = vc_sm_ioctl_unlock(file_data, &ioparam, 0, 1, 0);
2536
2537 /* Done. */
2538 goto out;
2539 }
2540 break;
2541
2542 /* Resize (attempt to) existing memory allocation. */
2543 case VMCS_SM_CMD_RESIZE:
2544 {
2545 struct vmcs_sm_ioctl_resize ioparam;
2546
2547 /* Get parameter data. */
2548 if (copy_from_user
2549 (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
2550 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2551 __func__, cmdnr);
2552 ret = -EFAULT;
2553 goto out;
2554 }
2555
2556 ret = vc_sm_ioctl_resize(file_data, &ioparam);
2557
2558 /* Copy result back to user. */
2559 if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
2560 != 0) {
2561 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2562 __func__, cmdnr);
2563 ret = -EFAULT;
2564 }
2565 goto out;
2566 }
2567 break;
2568
2569 /* Terminate existing memory allocation.
2570 */
2571 case VMCS_SM_CMD_FREE:
2572 {
2573 struct vmcs_sm_ioctl_free ioparam;
2574
2575 /* Get parameter data.
2576 */
2577 if (copy_from_user
2578 (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
2579 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2580 __func__, cmdnr);
2581 ret = -EFAULT;
2582 goto out;
2583 }
2584
2585 ret = vc_sm_ioctl_free(file_data, &ioparam);
2586
2587 /* Done.
2588 */
2589 goto out;
2590 }
2591 break;
2592
2593 /* Walk allocation on videocore, information shows up in the
2594 ** videocore log.
2595 */
2596 case VMCS_SM_CMD_VC_WALK_ALLOC:
2597 {
2598 pr_debug("[%s]: invoking walk alloc\n", __func__);
2599
2600 if (vc_vchi_sm_walk_alloc(sm_state->sm_handle) != 0)
2601 pr_err("[%s]: failed to walk-alloc on videocore\n",
2602 __func__);
2603
2604 /* Done.
2605 */
2606 goto out;
2607 }
2608 break;
2609 /* Walk mapping table on host, information shows up in the
2610 ** kernel log.
2611 */
2612 case VMCS_SM_CMD_HOST_WALK_MAP:
2613 {
2614 /* Use pid of -1 to tell to walk the whole map. */
2615 vmcs_sm_host_walk_map_per_pid(-1);
2616
2617 /* Done. */
2618 goto out;
2619 }
2620 break;
2621
2622 /* Walk mapping table per process on host. */
2623 case VMCS_SM_CMD_HOST_WALK_PID_ALLOC:
2624 {
2625 struct vmcs_sm_ioctl_walk ioparam;
2626
2627 /* Get parameter data. */
2628 if (copy_from_user(&ioparam,
2629 (void *)arg, sizeof(ioparam)) != 0) {
2630 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2631 __func__, cmdnr);
2632 ret = -EFAULT;
2633 goto out;
2634 }
2635
2636 vmcs_sm_host_walk_alloc(file_data);
2637
2638 /* Done. */
2639 goto out;
2640 }
2641 break;
2642
2643 /* Walk allocation per process on host. */
2644 case VMCS_SM_CMD_HOST_WALK_PID_MAP:
2645 {
2646 struct vmcs_sm_ioctl_walk ioparam;
2647
2648 /* Get parameter data. */
2649 if (copy_from_user(&ioparam,
2650 (void *)arg, sizeof(ioparam)) != 0) {
2651 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2652 __func__, cmdnr);
2653 ret = -EFAULT;
2654 goto out;
2655 }
2656
2657 vmcs_sm_host_walk_map_per_pid(ioparam.pid);
2658
2659 /* Done. */
2660 goto out;
2661 }
2662 break;
2663
2664 /* Gets the size of the memory associated with a user handle. */
2665 case VMCS_SM_CMD_SIZE_USR_HANDLE:
2666 {
2667 struct vmcs_sm_ioctl_size ioparam;
2668
2669 /* Get parameter data. */
2670 if (copy_from_user(&ioparam,
2671 (void *)arg, sizeof(ioparam)) != 0) {
2672 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2673 __func__, cmdnr);
2674 ret = -EFAULT;
2675 goto out;
2676 }
2677
2678 /* Locate resource from GUID. */
2679 resource =
2680 vmcs_sm_acquire_resource(file_data, ioparam.handle);
2681 if (resource != NULL) {
2682 ioparam.size = resource->res_size;
2683 vmcs_sm_release_resource(resource, 0);
2684 } else {
2685 ioparam.size = 0;
2686 }
2687
2688 if (copy_to_user((void *)arg,
2689 &ioparam, sizeof(ioparam)) != 0) {
2690 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2691 __func__, cmdnr);
2692 ret = -EFAULT;
2693 }
2694
2695 /* Done. */
2696 goto out;
2697 }
2698 break;
2699
2700 /* Verify we are dealing with a valid resource. */
2701 case VMCS_SM_CMD_CHK_USR_HANDLE:
2702 {
2703 struct vmcs_sm_ioctl_chk ioparam;
2704
2705 /* Get parameter data. */
2706 if (copy_from_user(&ioparam,
2707 (void *)arg, sizeof(ioparam)) != 0) {
2708 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2709 __func__, cmdnr);
2710
2711 ret = -EFAULT;
2712 goto out;
2713 }
2714
2715 /* Locate resource from GUID. */
2716 resource =
2717 vmcs_sm_acquire_resource(file_data, ioparam.handle);
2718 if (resource == NULL)
2719 ret = -EINVAL;
2720 /*
2721 * If the resource is cacheable, return additional
2722 * information that may be needed to flush the cache.
2723 */
2724 else if ((resource->res_cached == VMCS_SM_CACHE_HOST) ||
2725 (resource->res_cached == VMCS_SM_CACHE_BOTH)) {
2726 ioparam.addr =
2727 vmcs_sm_usr_address_from_pid_and_usr_handle
2728 (current->tgid, ioparam.handle);
2729 ioparam.size = resource->res_size;
2730 ioparam.cache = resource->res_cached;
2731 } else {
2732 ioparam.addr = 0;
2733 ioparam.size = 0;
2734 ioparam.cache = resource->res_cached;
2735 }
2736
2737 if (resource)
2738 vmcs_sm_release_resource(resource, 0);
2739
2740 if (copy_to_user((void *)arg,
2741 &ioparam, sizeof(ioparam)) != 0) {
2742 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2743 __func__, cmdnr);
2744 ret = -EFAULT;
2745 }
2746
2747 /* Done. */
2748 goto out;
2749 }
2750 break;
2751
2752 /*
2753 * Maps a user handle given the process and the virtual address.
2754 */
2755 case VMCS_SM_CMD_MAPPED_USR_HANDLE:
2756 {
2757 struct vmcs_sm_ioctl_map ioparam;
2758
2759 /* Get parameter data. */
2760 if (copy_from_user(&ioparam,
2761 (void *)arg, sizeof(ioparam)) != 0) {
2762 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2763 __func__, cmdnr);
2764
2765 ret = -EFAULT;
2766 goto out;
2767 }
2768
2769 ioparam.handle =
2770 vmcs_sm_usr_handle_from_pid_and_address(
2771 ioparam.pid, ioparam.addr);
2772
2773 resource =
2774 vmcs_sm_acquire_resource(file_data, ioparam.handle);
2775 if ((resource != NULL)
2776 && ((resource->res_cached == VMCS_SM_CACHE_HOST)
2777 || (resource->res_cached ==
2778 VMCS_SM_CACHE_BOTH))) {
2779 ioparam.size = resource->res_size;
2780 } else {
2781 ioparam.size = 0;
2782 }
2783
2784 if (resource)
2785 vmcs_sm_release_resource(resource, 0);
2786
2787 if (copy_to_user((void *)arg,
2788 &ioparam, sizeof(ioparam)) != 0) {
2789 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2790 __func__, cmdnr);
2791 ret = -EFAULT;
2792 }
2793
2794 /* Done. */
2795 goto out;
2796 }
2797 break;
2798
2799 /*
2800 * Maps a videocore handle given process and virtual address.
2801 */
2802 case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR:
2803 {
2804 struct vmcs_sm_ioctl_map ioparam;
2805
2806 /* Get parameter data. */
2807 if (copy_from_user(&ioparam,
2808 (void *)arg, sizeof(ioparam)) != 0) {
2809 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2810 __func__, cmdnr);
2811 ret = -EFAULT;
2812 goto out;
2813 }
2814
2815 ioparam.handle = vmcs_sm_vc_handle_from_pid_and_address(
2816 ioparam.pid, ioparam.addr);
2817
2818 if (copy_to_user((void *)arg,
2819 &ioparam, sizeof(ioparam)) != 0) {
2820 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2821 __func__, cmdnr);
2822
2823 ret = -EFAULT;
2824 }
2825
2826 /* Done. */
2827 goto out;
2828 }
2829 break;
2830
2831 /* Maps a videocore handle given process and user handle. */
2832 case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL:
2833 {
2834 struct vmcs_sm_ioctl_map ioparam;
2835
2836 /* Get parameter data. */
2837 if (copy_from_user(&ioparam,
2838 (void *)arg, sizeof(ioparam)) != 0) {
2839 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2840 __func__, cmdnr);
2841 ret = -EFAULT;
2842 goto out;
2843 }
2844
2845 /* Locate resource from GUID. */
2846 resource =
2847 vmcs_sm_acquire_resource(file_data, ioparam.handle);
2848 if (resource != NULL) {
2849 ioparam.handle = resource->res_handle;
2850 vmcs_sm_release_resource(resource, 0);
2851 } else {
2852 ioparam.handle = 0;
2853 }
2854
2855 if (copy_to_user((void *)arg,
2856 &ioparam, sizeof(ioparam)) != 0) {
2857 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2858 __func__, cmdnr);
2859
2860 ret = -EFAULT;
2861 }
2862
2863 /* Done. */
2864 goto out;
2865 }
2866 break;
2867
2868 /*
2869 * Maps a videocore address given process and videocore handle.
2870 */
2871 case VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL:
2872 {
2873 struct vmcs_sm_ioctl_map ioparam;
2874
2875 /* Get parameter data. */
2876 if (copy_from_user(&ioparam,
2877 (void *)arg, sizeof(ioparam)) != 0) {
2878 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2879 __func__, cmdnr);
2880
2881 ret = -EFAULT;
2882 goto out;
2883 }
2884
2885 /* Locate resource from GUID. */
2886 resource =
2887 vmcs_sm_acquire_resource(file_data, ioparam.handle);
2888 if (resource != NULL) {
2889 ioparam.addr =
2890 (unsigned int)resource->res_base_mem;
2891 vmcs_sm_release_resource(resource, 0);
2892 } else {
2893 ioparam.addr = 0;
2894 }
2895
2896 if (copy_to_user((void *)arg,
2897 &ioparam, sizeof(ioparam)) != 0) {
2898 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2899 __func__, cmdnr);
2900 ret = -EFAULT;
2901 }
2902
2903 /* Done. */
2904 goto out;
2905 }
2906 break;
2907
2908 /* Maps a user address given process and vc handle. */
2909 case VMCS_SM_CMD_MAPPED_USR_ADDRESS:
2910 {
2911 struct vmcs_sm_ioctl_map ioparam;
2912
2913 /* Get parameter data. */
2914 if (copy_from_user(&ioparam,
2915 (void *)arg, sizeof(ioparam)) != 0) {
2916 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2917 __func__, cmdnr);
2918 ret = -EFAULT;
2919 goto out;
2920 }
2921
2922 /*
2923 * Return the address information from the mapping,
2924 * 0 (ie NULL) if it cannot locate the actual mapping.
2925 */
2926 ioparam.addr =
2927 vmcs_sm_usr_address_from_pid_and_usr_handle
2928 (ioparam.pid, ioparam.handle);
2929
2930 if (copy_to_user((void *)arg,
2931 &ioparam, sizeof(ioparam)) != 0) {
2932 pr_err("[%s]: failed to copy-to-user for cmd %x\n",
2933 __func__, cmdnr);
2934 ret = -EFAULT;
2935 }
2936
2937 /* Done. */
2938 goto out;
2939 }
2940 break;
2941
2942 /* Flush the cache for a given mapping. */
2943 case VMCS_SM_CMD_FLUSH:
2944 {
2945 struct vmcs_sm_ioctl_cache ioparam;
2946
2947 /* Get parameter data. */
2948 if (copy_from_user(&ioparam,
2949 (void *)arg, sizeof(ioparam)) != 0) {
2950 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2951 __func__, cmdnr);
2952 ret = -EFAULT;
2953 goto out;
2954 }
2955
2956 /* Locate resource from GUID. */
2957 resource =
2958 vmcs_sm_acquire_resource(file_data, ioparam.handle);
6986e9a3 2959 if (resource == NULL) {
46bc9043
TG
2960 ret = -EINVAL;
2961 goto out;
2962 }
2963
803ef4df 2964 ret = clean_invalid_resource_walk((void __user*) ioparam.addr,
6986e9a3
SY
2965 ioparam.size, VCSM_CACHE_OP_FLUSH, ioparam.handle,
2966 resource);
2967 vmcs_sm_release_resource(resource, 0);
2968 if (ret)
2969 goto out;
46bc9043
TG
2970 }
2971 break;
2972
2973 /* Invalidate the cache for a given mapping. */
2974 case VMCS_SM_CMD_INVALID:
2975 {
2976 struct vmcs_sm_ioctl_cache ioparam;
2977
2978 /* Get parameter data. */
2979 if (copy_from_user(&ioparam,
2980 (void *)arg, sizeof(ioparam)) != 0) {
2981 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
2982 __func__, cmdnr);
2983 ret = -EFAULT;
2984 goto out;
2985 }
2986
2987 /* Locate resource from GUID. */
2988 resource =
2989 vmcs_sm_acquire_resource(file_data, ioparam.handle);
6986e9a3 2990 if (resource == NULL) {
46bc9043
TG
2991 ret = -EINVAL;
2992 goto out;
2993 }
2994
803ef4df 2995 ret = clean_invalid_resource_walk((void __user*) ioparam.addr,
6986e9a3
SY
2996 ioparam.size, VCSM_CACHE_OP_INV, ioparam.handle, resource);
2997 vmcs_sm_release_resource(resource, 0);
2998 if (ret)
2999 goto out;
46bc9043
TG
3000 }
3001 break;
3002
3003 /* Flush/Invalidate the cache for a given mapping. */
3004 case VMCS_SM_CMD_CLEAN_INVALID:
3005 {
3006 int i;
3007 struct vmcs_sm_ioctl_clean_invalid ioparam;
3008
3009 /* Get parameter data. */
3010 if (copy_from_user(&ioparam,
3011 (void *)arg, sizeof(ioparam)) != 0) {
3012 pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3013 __func__, cmdnr);
3014 ret = -EFAULT;
3015 goto out;
3016 }
3017 for (i = 0; i < sizeof(ioparam.s) / sizeof(*ioparam.s); i++) {
a45cb5e0
SY
3018 if (ioparam.s[i].cmd == VCSM_CACHE_OP_NOP)
3019 break;
3020
6986e9a3
SY
3021 /* Locate resource from GUID. */
3022 resource =
3023 vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle);
3024 if (resource == NULL) {
3025 ret = -EINVAL;
3026 goto out;
46bc9043 3027 }
6986e9a3 3028
803ef4df
SY
3029 ret = clean_invalid_resource_walk(
3030 (void __user*) ioparam.s[i].addr, ioparam.s[i].size,
3031 ioparam.s[i].cmd, ioparam.s[i].handle, resource);
6986e9a3
SY
3032 vmcs_sm_release_resource(resource, 0);
3033 if (ret)
3034 goto out;
46bc9043
TG
3035 }
3036 }
3037 break;
803ef4df
SY
3038 /*
3039 * Flush/Invalidate the cache for a given mapping.
3040 * Blocks must be pinned (i.e. accessed) before this call.
3041 */
46bc9043
TG
3042 case VMCS_SM_CMD_CLEAN_INVALID2:
3043 {
6986e9a3 3044 int i;
46bc9043
TG
3045 struct vmcs_sm_ioctl_clean_invalid2 ioparam;
3046 struct vmcs_sm_ioctl_clean_invalid_block *block = NULL;
3047
3048 /* Get parameter data. */
3049 if (copy_from_user(&ioparam,
3050 (void *)arg, sizeof(ioparam)) != 0) {
3051 pr_err("[%s]: failed to copy-from-user header for cmd %x\n",
3052 __func__, cmdnr);
3053 ret = -EFAULT;
3054 goto out;
3055 }
3056 block = kmalloc(ioparam.op_count *
3057 sizeof(struct vmcs_sm_ioctl_clean_invalid_block),
3058 GFP_KERNEL);
3059 if (!block) {
3060 ret = -EFAULT;
3061 goto out;
3062 }
3063 if (copy_from_user(block,
3064 (void *)(arg + sizeof(ioparam)), ioparam.op_count * sizeof(struct vmcs_sm_ioctl_clean_invalid_block)) != 0) {
3065 pr_err("[%s]: failed to copy-from-user payload for cmd %x\n",
3066 __func__, cmdnr);
3067 ret = -EFAULT;
3068 goto out;
3069 }
3070
3071 for (i = 0; i < ioparam.op_count; i++) {
3072 const struct vmcs_sm_ioctl_clean_invalid_block * const op = block + i;
46bc9043 3073
a45cb5e0
SY
3074 if (op->invalidate_mode == VCSM_CACHE_OP_NOP)
3075 continue;
3076
803ef4df
SY
3077 ret = clean_invalid_contiguous_mem_2d(
3078 (void __user*) op->start_address, op->block_count,
3079 op->block_size, op->inter_block_stride,
3080 op->invalidate_mode);
6986e9a3 3081 if (ret)
c620778a 3082 break;
46bc9043
TG
3083 }
3084 kfree(block);
3085 }
3086 break;
3087
3088 default:
3089 {
3090 ret = -EINVAL;
3091 goto out;
3092 }
3093 break;
3094 }
3095
3096out:
3097 return ret;
3098}
3099
3100/* Device operations that we managed in this driver. */
3101static const struct file_operations vmcs_sm_ops = {
3102 .owner = THIS_MODULE,
3103 .unlocked_ioctl = vc_sm_ioctl,
3104 .open = vc_sm_open,
3105 .release = vc_sm_release,
3106 .mmap = vc_sm_mmap,
3107};
3108
3109/* Creation of device. */
3110static int vc_sm_create_sharedmemory(void)
3111{
3112 int ret;
3113
3114 if (sm_state == NULL) {
3115 ret = -ENOMEM;
3116 goto out;
3117 }
3118
3119 /* Create a device class for creating dev nodes. */
3120 sm_state->sm_class = class_create(THIS_MODULE, "vc-sm");
3121 if (IS_ERR(sm_state->sm_class)) {
3122 pr_err("[%s]: unable to create device class\n", __func__);
3123 ret = PTR_ERR(sm_state->sm_class);
3124 goto out;
3125 }
3126
3127 /* Create a character driver. */
3128 ret = alloc_chrdev_region(&sm_state->sm_devid,
3129 DEVICE_MINOR, 1, DEVICE_NAME);
3130 if (ret != 0) {
3131 pr_err("[%s]: unable to allocate device number\n", __func__);
3132 goto out_dev_class_destroy;
3133 }
3134
3135 cdev_init(&sm_state->sm_cdev, &vmcs_sm_ops);
3136 ret = cdev_add(&sm_state->sm_cdev, sm_state->sm_devid, 1);
3137 if (ret != 0) {
3138 pr_err("[%s]: unable to register device\n", __func__);
3139 goto out_chrdev_unreg;
3140 }
3141
3142 /* Create a device node. */
3143 sm_state->sm_dev = device_create(sm_state->sm_class,
3144 NULL,
3145 MKDEV(MAJOR(sm_state->sm_devid),
3146 DEVICE_MINOR), NULL,
3147 DEVICE_NAME);
3148 if (IS_ERR(sm_state->sm_dev)) {
3149 pr_err("[%s]: unable to create device node\n", __func__);
3150 ret = PTR_ERR(sm_state->sm_dev);
3151 goto out_chrdev_del;
3152 }
3153
3154 goto out;
3155
3156out_chrdev_del:
3157 cdev_del(&sm_state->sm_cdev);
3158out_chrdev_unreg:
3159 unregister_chrdev_region(sm_state->sm_devid, 1);
3160out_dev_class_destroy:
3161 class_destroy(sm_state->sm_class);
3162 sm_state->sm_class = NULL;
3163out:
3164 return ret;
3165}
3166
3167/* Termination of the device. */
3168static int vc_sm_remove_sharedmemory(void)
3169{
3170 int ret;
3171
3172 if (sm_state == NULL) {
3173 /* Nothing to do. */
3174 ret = 0;
3175 goto out;
3176 }
3177
3178 /* Remove the sharedmemory character driver. */
3179 cdev_del(&sm_state->sm_cdev);
3180
3181 /* Unregister region. */
3182 unregister_chrdev_region(sm_state->sm_devid, 1);
3183
3184 ret = 0;
3185 goto out;
3186
3187out:
3188 return ret;
3189}
3190
3191/* Videocore connected. */
3192static void vc_sm_connected_init(void)
3193{
3194 int ret;
3195 VCHI_INSTANCE_T vchi_instance;
3196 VCHI_CONNECTION_T *vchi_connection = NULL;
3197
3198 pr_info("[%s]: start\n", __func__);
3199
3200 /*
3201 * Initialize and create a VCHI connection for the shared memory service
3202 * running on videocore.
3203 */
3204 ret = vchi_initialise(&vchi_instance);
3205 if (ret != 0) {
3206 pr_err("[%s]: failed to initialise VCHI instance (ret=%d)\n",
3207 __func__, ret);
3208
3209 ret = -EIO;
3210 goto err_free_mem;
3211 }
3212
3213 ret = vchi_connect(NULL, 0, vchi_instance);
3214 if (ret != 0) {
3215 pr_err("[%s]: failed to connect VCHI instance (ret=%d)\n",
3216 __func__, ret);
3217
3218 ret = -EIO;
3219 goto err_free_mem;
3220 }
3221
3222 /* Initialize an instance of the shared memory service. */
3223 sm_state->sm_handle =
3224 vc_vchi_sm_init(vchi_instance, &vchi_connection, 1);
3225 if (sm_state->sm_handle == NULL) {
3226 pr_err("[%s]: failed to initialize shared memory service\n",
3227 __func__);
3228
3229 ret = -EPERM;
3230 goto err_free_mem;
3231 }
3232
3233 /* Create a debug fs directory entry (root). */
3234 sm_state->dir_root = debugfs_create_dir(VC_SM_DIR_ROOT_NAME, NULL);
3235 if (!sm_state->dir_root) {
3236 pr_err("[%s]: failed to create \'%s\' directory entry\n",
3237 __func__, VC_SM_DIR_ROOT_NAME);
3238
3239 ret = -EPERM;
3240 goto err_stop_sm_service;
3241 }
3242
3243 sm_state->dir_state.show = &vc_sm_global_state_show;
3244 sm_state->dir_state.dir_entry = debugfs_create_file(VC_SM_STATE,
3245 0444, sm_state->dir_root, &sm_state->dir_state,
3246 &vc_sm_debug_fs_fops);
3247
3248 sm_state->dir_stats.show = &vc_sm_global_statistics_show;
3249 sm_state->dir_stats.dir_entry = debugfs_create_file(VC_SM_STATS,
3250 0444, sm_state->dir_root, &sm_state->dir_stats,
3251 &vc_sm_debug_fs_fops);
3252
3253 /* Create the proc entry children. */
3254 sm_state->dir_alloc = debugfs_create_dir(VC_SM_DIR_ALLOC_NAME,
3255 sm_state->dir_root);
3256
3257 /* Create a shared memory device. */
3258 ret = vc_sm_create_sharedmemory();
3259 if (ret != 0) {
3260 pr_err("[%s]: failed to create shared memory device\n",
3261 __func__);
3262 goto err_remove_debugfs;
3263 }
3264
3265 INIT_LIST_HEAD(&sm_state->map_list);
3266 INIT_LIST_HEAD(&sm_state->resource_list);
3267
3268 sm_state->data_knl = vc_sm_create_priv_data(0);
3269 if (sm_state->data_knl == NULL) {
3270 pr_err("[%s]: failed to create kernel private data tracker\n",
3271 __func__);
3272 goto err_remove_shared_memory;
3273 }
3274
3275 /* Done! */
3276 sm_inited = 1;
3277 goto out;
3278
3279err_remove_shared_memory:
3280 vc_sm_remove_sharedmemory();
3281err_remove_debugfs:
3282 debugfs_remove_recursive(sm_state->dir_root);
3283err_stop_sm_service:
3284 vc_vchi_sm_stop(&sm_state->sm_handle);
3285err_free_mem:
3286 kfree(sm_state);
3287out:
3288 pr_info("[%s]: end - returning %d\n", __func__, ret);
3289}
3290
3291/* Driver loading. */
3292static int bcm2835_vcsm_probe(struct platform_device *pdev)
3293{
3294 pr_info("vc-sm: Videocore shared memory driver\n");
3295
3296 sm_state = kzalloc(sizeof(*sm_state), GFP_KERNEL);
3297 if (!sm_state)
3298 return -ENOMEM;
3299 sm_state->pdev = pdev;
3300 mutex_init(&sm_state->lock);
3301 mutex_init(&sm_state->map_lock);
3302
3303 vchiq_add_connected_callback(vc_sm_connected_init);
3304 return 0;
3305}
3306
3307/* Driver unloading. */
3308static int bcm2835_vcsm_remove(struct platform_device *pdev)
3309{
3310 pr_debug("[%s]: start\n", __func__);
3311 if (sm_inited) {
3312 /* Remove shared memory device. */
3313 vc_sm_remove_sharedmemory();
3314
3315 /* Remove all proc entries. */
3316 debugfs_remove_recursive(sm_state->dir_root);
3317
3318 /* Stop the videocore shared memory service. */
3319 vc_vchi_sm_stop(&sm_state->sm_handle);
3320
3321 /* Free the memory for the state structure. */
3322 mutex_destroy(&(sm_state->map_lock));
3323 kfree(sm_state);
3324 }
3325
3326 pr_debug("[%s]: end\n", __func__);
3327 return 0;
3328}
3329
3330#if defined(__KERNEL__)
3331/* Allocate a shared memory handle and block. */
3332int vc_sm_alloc(struct vc_sm_alloc_t *alloc, int *handle)
3333{
3334 struct vmcs_sm_ioctl_alloc ioparam = { 0 };
3335 int ret;
3336 struct sm_resource_t *resource;
3337
3338 /* Validate we can work with this device. */
3339 if (sm_state == NULL || alloc == NULL || handle == NULL) {
3340 pr_err("[%s]: invalid input\n", __func__);
3341 return -EPERM;
3342 }
3343
3344 ioparam.size = alloc->base_unit;
3345 ioparam.num = alloc->num_unit;
3346 ioparam.cached =
3347 alloc->type == VC_SM_ALLOC_CACHED ? VMCS_SM_CACHE_VC : 0;
3348
3349 ret = vc_sm_ioctl_alloc(sm_state->data_knl, &ioparam);
3350
3351 if (ret == 0) {
3352 resource =
3353 vmcs_sm_acquire_resource(sm_state->data_knl,
3354 ioparam.handle);
3355 if (resource) {
3356 resource->pid = 0;
3357 vmcs_sm_release_resource(resource, 0);
3358
3359 /* Assign valid handle at this time. */
3360 *handle = ioparam.handle;
3361 } else {
3362 ret = -ENOMEM;
3363 }
3364 }
3365
3366 return ret;
3367}
3368EXPORT_SYMBOL_GPL(vc_sm_alloc);
3369
3370/* Get an internal resource handle mapped from the external one. */
3371int vc_sm_int_handle(int handle)
3372{
3373 struct sm_resource_t *resource;
3374 int ret = 0;
3375
3376 /* Validate we can work with this device. */
3377 if (sm_state == NULL || handle == 0) {
3378 pr_err("[%s]: invalid input\n", __func__);
3379 return 0;
3380 }
3381
3382 /* Locate resource from GUID. */
3383 resource = vmcs_sm_acquire_resource(sm_state->data_knl, handle);
3384 if (resource) {
3385 ret = resource->res_handle;
3386 vmcs_sm_release_resource(resource, 0);
3387 }
3388
3389 return ret;
3390}
3391EXPORT_SYMBOL_GPL(vc_sm_int_handle);
3392
3393/* Free a previously allocated shared memory handle and block. */
3394int vc_sm_free(int handle)
3395{
3396 struct vmcs_sm_ioctl_free ioparam = { handle };
3397
3398 /* Validate we can work with this device. */
3399 if (sm_state == NULL || handle == 0) {
3400 pr_err("[%s]: invalid input\n", __func__);
3401 return -EPERM;
3402 }
3403
3404 return vc_sm_ioctl_free(sm_state->data_knl, &ioparam);
3405}
3406EXPORT_SYMBOL_GPL(vc_sm_free);
3407
3408/* Lock a memory handle for use by kernel. */
3409int vc_sm_lock(int handle, enum vc_sm_lock_cache_mode mode,
3410 unsigned long *data)
3411{
3412 struct vmcs_sm_ioctl_lock_unlock ioparam;
3413 int ret;
3414
3415 /* Validate we can work with this device. */
3416 if (sm_state == NULL || handle == 0 || data == NULL) {
3417 pr_err("[%s]: invalid input\n", __func__);
3418 return -EPERM;
3419 }
3420
3421 *data = 0;
3422
3423 ioparam.handle = handle;
3424 ret = vc_sm_ioctl_lock(sm_state->data_knl,
3425 &ioparam,
3426 1,
3427 ((mode ==
3428 VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
3429 VMCS_SM_CACHE_NONE), 0);
3430
3431 *data = ioparam.addr;
3432 return ret;
3433}
3434EXPORT_SYMBOL_GPL(vc_sm_lock);
3435
3436/* Unlock a memory handle in use by kernel. */
3437int vc_sm_unlock(int handle, int flush, int no_vc_unlock)
3438{
3439 struct vmcs_sm_ioctl_lock_unlock ioparam;
3440
3441 /* Validate we can work with this device. */
3442 if (sm_state == NULL || handle == 0) {
3443 pr_err("[%s]: invalid input\n", __func__);
3444 return -EPERM;
3445 }
3446
3447 ioparam.handle = handle;
3448 return vc_sm_ioctl_unlock(sm_state->data_knl,
3449 &ioparam, flush, 0, no_vc_unlock);
3450}
3451EXPORT_SYMBOL_GPL(vc_sm_unlock);
3452
3453/* Map a shared memory region for use by kernel. */
3454int vc_sm_map(int handle, unsigned int sm_addr,
3455 enum vc_sm_lock_cache_mode mode, unsigned long *data)
3456{
3457 struct vmcs_sm_ioctl_lock_unlock ioparam;
3458 int ret;
3459
3460 /* Validate we can work with this device. */
3461 if (sm_state == NULL || handle == 0 || data == NULL || sm_addr == 0) {
3462 pr_err("[%s]: invalid input\n", __func__);
3463 return -EPERM;
3464 }
3465
3466 *data = 0;
3467
3468 ioparam.handle = handle;
3469 ret = vc_sm_ioctl_lock(sm_state->data_knl,
3470 &ioparam,
3471 1,
3472 ((mode ==
3473 VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
3474 VMCS_SM_CACHE_NONE), sm_addr);
3475
3476 *data = ioparam.addr;
3477 return ret;
3478}
3479EXPORT_SYMBOL_GPL(vc_sm_map);
3480
3481/* Import a dmabuf to be shared with VC. */
3482int vc_sm_import_dmabuf(struct dma_buf *dmabuf, int *handle)
3483{
3484 struct vmcs_sm_ioctl_import_dmabuf ioparam = { 0 };
3485 int ret;
3486 struct sm_resource_t *resource;
3487
3488 /* Validate we can work with this device. */
3489 if (!sm_state || !dmabuf || !handle) {
3490 pr_err("[%s]: invalid input\n", __func__);
3491 return -EPERM;
3492 }
3493
3494 ioparam.cached = 0;
3495 strcpy(ioparam.name, "KRNL DMABUF");
3496
3497 ret = vc_sm_ioctl_import_dmabuf(sm_state->data_knl, &ioparam, dmabuf);
3498
3499 if (!ret) {
3500 resource = vmcs_sm_acquire_resource(sm_state->data_knl,
3501 ioparam.handle);
3502 if (resource) {
3503 resource->pid = 0;
3504 vmcs_sm_release_resource(resource, 0);
3505
3506 /* Assign valid handle at this time.*/
3507 *handle = ioparam.handle;
3508 } else {
3509 ret = -ENOMEM;
3510 }
3511 }
3512
3513 return ret;
3514}
3515EXPORT_SYMBOL_GPL(vc_sm_import_dmabuf);
3516#endif
3517
3518/*
3519 * Register the driver with device tree
3520 */
3521
3522static const struct of_device_id bcm2835_vcsm_of_match[] = {
3523 {.compatible = "raspberrypi,bcm2835-vcsm",},
3524 { /* sentinel */ },
3525};
3526
3527MODULE_DEVICE_TABLE(of, bcm2835_vcsm_of_match);
3528
3529static struct platform_driver bcm2835_vcsm_driver = {
3530 .probe = bcm2835_vcsm_probe,
3531 .remove = bcm2835_vcsm_remove,
3532 .driver = {
3533 .name = DRIVER_NAME,
3534 .owner = THIS_MODULE,
3535 .of_match_table = bcm2835_vcsm_of_match,
3536 },
3537};
3538
3539module_platform_driver(bcm2835_vcsm_driver);
3540
3541MODULE_AUTHOR("Broadcom");
3542MODULE_DESCRIPTION("VideoCore SharedMemory Driver");
3543MODULE_LICENSE("GPL v2");