]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
ARM64: Fix bad cast in vc04_services
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_2835_arm.c
1 /**
2 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions, and the following disclaimer,
9 * without modification.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The names of the above-listed copyright holders may not be used
14 * to endorse or promote products derived from this software without
15 * specific prior written permission.
16 *
17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2, as published by the Free
19 * Software Foundation.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/pagemap.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/version.h>
41 #include <linux/io.h>
42 #include <linux/platform_device.h>
43 #include <linux/uaccess.h>
44 #include <linux/of.h>
45 #include <asm/pgtable.h>
46 #include <soc/bcm2835/raspberrypi-firmware.h>
47
48 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
49
50 #include "vchiq_arm.h"
51 #include "vchiq_2835.h"
52 #include "vchiq_connected.h"
53 #include "vchiq_killable.h"
54
55 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
56
57 #define BELL0 0x00
58 #define BELL2 0x08
59
60 typedef struct vchiq_2835_state_struct {
61 int inited;
62 VCHIQ_ARM_STATE_T arm_state;
63 } VCHIQ_2835_ARM_STATE_T;
64
65 struct vchiq_pagelist_info {
66 PAGELIST_T *pagelist;
67 size_t pagelist_buffer_size;
68 dma_addr_t dma_addr;
69 enum dma_data_direction dma_dir;
70 unsigned int num_pages;
71 unsigned int pages_need_release;
72 struct page **pages;
73 struct scatterlist *scatterlist;
74 unsigned int scatterlist_mapped;
75 };
76
77 static void __iomem *g_regs;
78 static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE);
79 static unsigned int g_fragments_size;
80 static char *g_fragments_base;
81 static char *g_free_fragments;
82 static struct semaphore g_free_fragments_sema;
83 static struct device *g_dev;
84
85 extern int vchiq_arm_log_level;
86
87 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
88
89 static irqreturn_t
90 vchiq_doorbell_irq(int irq, void *dev_id);
91
92 static struct vchiq_pagelist_info *
93 create_pagelist(char __user *buf, size_t count, unsigned short type,
94 struct task_struct *task);
95
96 static void
97 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
98 int actual);
99
100 int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
101 {
102 struct device *dev = &pdev->dev;
103 struct rpi_firmware *fw = platform_get_drvdata(pdev);
104 VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
105 struct resource *res;
106 void *slot_mem;
107 dma_addr_t slot_phys;
108 u32 channelbase;
109 int slot_mem_size, frag_mem_size;
110 int err, irq, i;
111
112 /*
113 * VCHI messages between the CPU and firmware use
114 * 32-bit bus addresses.
115 */
116 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
117
118 if (err < 0)
119 return err;
120
121 (void)of_property_read_u32(dev->of_node, "cache-line-size",
122 &g_cache_line_size);
123 g_fragments_size = 2 * g_cache_line_size;
124
125 /* Allocate space for the channels in coherent memory */
126 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
127 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
128
129 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
130 &slot_phys, GFP_KERNEL);
131 if (!slot_mem) {
132 dev_err(dev, "could not allocate DMA memory\n");
133 return -ENOMEM;
134 }
135
136 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
137
138 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
139 if (!vchiq_slot_zero)
140 return -EINVAL;
141
142 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
143 (int)slot_phys + slot_mem_size;
144 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
145 MAX_FRAGMENTS;
146
147 g_fragments_base = (char *)slot_mem + slot_mem_size;
148 slot_mem_size += frag_mem_size;
149
150 g_free_fragments = g_fragments_base;
151 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
152 *(char **)&g_fragments_base[i*g_fragments_size] =
153 &g_fragments_base[(i + 1)*g_fragments_size];
154 }
155 *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
156 sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
157
158 if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
159 return -EINVAL;
160
161 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
162 g_regs = devm_ioremap_resource(&pdev->dev, res);
163 if (IS_ERR(g_regs))
164 return PTR_ERR(g_regs);
165
166 irq = platform_get_irq(pdev, 0);
167 if (irq <= 0) {
168 dev_err(dev, "failed to get IRQ\n");
169 return irq;
170 }
171
172 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
173 "VCHIQ doorbell", state);
174 if (err) {
175 dev_err(dev, "failed to register irq=%d\n", irq);
176 return err;
177 }
178
179 /* Send the base address of the slots to VideoCore */
180 channelbase = slot_phys;
181 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
182 &channelbase, sizeof(channelbase));
183 if (err || channelbase) {
184 dev_err(dev, "failed to set channelbase\n");
185 return err ? : -ENXIO;
186 }
187
188 g_dev = dev;
189 vchiq_log_info(vchiq_arm_log_level,
190 "vchiq_init - done (slots %pK, phys %pad)",
191 vchiq_slot_zero, &slot_phys);
192
193 vchiq_call_connected_callbacks();
194
195 return 0;
196 }
197
198 VCHIQ_STATUS_T
199 vchiq_platform_init_state(VCHIQ_STATE_T *state)
200 {
201 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
202 state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
203 ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
204 status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
205 if(status != VCHIQ_SUCCESS)
206 {
207 ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
208 }
209 return status;
210 }
211
212 VCHIQ_ARM_STATE_T*
213 vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
214 {
215 if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
216 {
217 BUG();
218 }
219 return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
220 }
221
222 void
223 remote_event_signal(REMOTE_EVENT_T *event)
224 {
225 wmb();
226
227 event->fired = 1;
228
229 dsb(sy); /* data barrier operation */
230
231 if (event->armed)
232 writel(0, g_regs + BELL2); /* trigger vc interrupt */
233 }
234
235 int
236 vchiq_copy_from_user(void *dst, const void *src, int size)
237 {
238 if ((unsigned long)src < TASK_SIZE) {
239 return copy_from_user(dst, src, size);
240 } else {
241 memcpy(dst, src, size);
242 return 0;
243 }
244 }
245
246 VCHIQ_STATUS_T
247 vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
248 void *offset, int size, int dir)
249 {
250 struct vchiq_pagelist_info *pagelistinfo;
251
252 WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
253
254 pagelistinfo = create_pagelist((char __user *)offset, size,
255 (dir == VCHIQ_BULK_RECEIVE)
256 ? PAGELIST_READ
257 : PAGELIST_WRITE,
258 current);
259
260 if (!pagelistinfo)
261 return VCHIQ_ERROR;
262
263 bulk->handle = memhandle;
264 bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
265
266 /*
267 * Store the pagelistinfo address in remote_data,
268 * which isn't used by the slave.
269 */
270 bulk->remote_data = pagelistinfo;
271
272 return VCHIQ_SUCCESS;
273 }
274
275 void
276 vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
277 {
278 if (bulk && bulk->remote_data && bulk->actual)
279 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
280 bulk->actual);
281 }
282
283 void
284 vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
285 {
286 /*
287 * This should only be called on the master (VideoCore) side, but
288 * provide an implementation to avoid the need for ifdefery.
289 */
290 BUG();
291 }
292
293 void
294 vchiq_dump_platform_state(void *dump_context)
295 {
296 char buf[80];
297 int len;
298 len = snprintf(buf, sizeof(buf),
299 " Platform: 2835 (VC master)");
300 vchiq_dump(dump_context, buf, len + 1);
301 }
302
303 VCHIQ_STATUS_T
304 vchiq_platform_suspend(VCHIQ_STATE_T *state)
305 {
306 return VCHIQ_ERROR;
307 }
308
309 VCHIQ_STATUS_T
310 vchiq_platform_resume(VCHIQ_STATE_T *state)
311 {
312 return VCHIQ_SUCCESS;
313 }
314
315 void
316 vchiq_platform_paused(VCHIQ_STATE_T *state)
317 {
318 }
319
320 void
321 vchiq_platform_resumed(VCHIQ_STATE_T *state)
322 {
323 }
324
325 int
326 vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
327 {
328 return 1; // autosuspend not supported - videocore always wanted
329 }
330
331 int
332 vchiq_platform_use_suspend_timer(void)
333 {
334 return 0;
335 }
336 void
337 vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
338 {
339 vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
340 }
341 void
342 vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
343 {
344 (void)state;
345 }
346 /*
347 * Local functions
348 */
349
350 static irqreturn_t
351 vchiq_doorbell_irq(int irq, void *dev_id)
352 {
353 VCHIQ_STATE_T *state = dev_id;
354 irqreturn_t ret = IRQ_NONE;
355 unsigned int status;
356
357 /* Read (and clear) the doorbell */
358 status = readl(g_regs + BELL0);
359
360 if (status & 0x4) { /* Was the doorbell rung? */
361 remote_event_pollall(state);
362 ret = IRQ_HANDLED;
363 }
364
365 return ret;
366 }
367
368 static void
369 cleaup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
370 {
371 if (pagelistinfo->scatterlist_mapped) {
372 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
373 pagelistinfo->num_pages, pagelistinfo->dma_dir);
374 }
375
376 if (pagelistinfo->pages_need_release) {
377 unsigned int i;
378
379 for (i = 0; i < pagelistinfo->num_pages; i++)
380 put_page(pagelistinfo->pages[i]);
381 }
382
383 dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
384 pagelistinfo->pagelist, pagelistinfo->dma_addr);
385 }
386
387 /* There is a potential problem with partial cache lines (pages?)
388 ** at the ends of the block when reading. If the CPU accessed anything in
389 ** the same line (page?) then it may have pulled old data into the cache,
390 ** obscuring the new data underneath. We can solve this by transferring the
391 ** partial cache lines separately, and allowing the ARM to copy into the
392 ** cached area.
393 */
394
395 static struct vchiq_pagelist_info *
396 create_pagelist(char __user *buf, size_t count, unsigned short type,
397 struct task_struct *task)
398 {
399 PAGELIST_T *pagelist;
400 struct vchiq_pagelist_info *pagelistinfo;
401 struct page **pages;
402 u32 *addrs;
403 unsigned int num_pages, offset, i, k;
404 int actual_pages;
405 size_t pagelist_size;
406 struct scatterlist *scatterlist, *sg;
407 int dma_buffers;
408 dma_addr_t dma_addr;
409
410 offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
411 num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
412
413 pagelist_size = sizeof(PAGELIST_T) +
414 (num_pages * sizeof(u32)) +
415 (num_pages * sizeof(pages[0]) +
416 (num_pages * sizeof(struct scatterlist))) +
417 sizeof(struct vchiq_pagelist_info);
418
419 /* Allocate enough storage to hold the page pointers and the page
420 ** list
421 */
422 pagelist = dma_zalloc_coherent(g_dev,
423 pagelist_size,
424 &dma_addr,
425 GFP_KERNEL);
426
427 vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK",
428 pagelist);
429 if (!pagelist)
430 return NULL;
431
432 addrs = pagelist->addrs;
433 pages = (struct page **)(addrs + num_pages);
434 scatterlist = (struct scatterlist *)(pages + num_pages);
435 pagelistinfo = (struct vchiq_pagelist_info *)
436 (scatterlist + num_pages);
437
438 pagelist->length = count;
439 pagelist->type = type;
440 pagelist->offset = offset;
441
442 /* Populate the fields of the pagelistinfo structure */
443 pagelistinfo->pagelist = pagelist;
444 pagelistinfo->pagelist_buffer_size = pagelist_size;
445 pagelistinfo->dma_addr = dma_addr;
446 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
447 DMA_TO_DEVICE : DMA_FROM_DEVICE;
448 pagelistinfo->num_pages = num_pages;
449 pagelistinfo->pages_need_release = 0;
450 pagelistinfo->pages = pages;
451 pagelistinfo->scatterlist = scatterlist;
452 pagelistinfo->scatterlist_mapped = 0;
453
454 if (is_vmalloc_addr(buf)) {
455 unsigned long length = count;
456 unsigned int off = offset;
457
458 for (actual_pages = 0; actual_pages < num_pages;
459 actual_pages++) {
460 struct page *pg = vmalloc_to_page(buf + (actual_pages *
461 PAGE_SIZE));
462 size_t bytes = PAGE_SIZE - off;
463
464 if (bytes > length)
465 bytes = length;
466 pages[actual_pages] = pg;
467 length -= bytes;
468 off = 0;
469 }
470 /* do not try and release vmalloc pages */
471 } else {
472 down_read(&task->mm->mmap_sem);
473 actual_pages = get_user_pages(
474 (unsigned long)buf & ~(PAGE_SIZE - 1),
475 num_pages,
476 (type == PAGELIST_READ) ? FOLL_WRITE : 0,
477 pages,
478 NULL /*vmas */);
479 up_read(&task->mm->mmap_sem);
480
481 if (actual_pages != num_pages) {
482 vchiq_log_info(vchiq_arm_log_level,
483 "create_pagelist - only %d/%d pages locked",
484 actual_pages,
485 num_pages);
486
487 /* This is probably due to the process being killed */
488 while (actual_pages > 0)
489 {
490 actual_pages--;
491 put_page(pages[actual_pages]);
492 }
493 cleaup_pagelistinfo(pagelistinfo);
494 return NULL;
495 }
496 /* release user pages */
497 pagelistinfo->pages_need_release = 1;
498 }
499
500 /*
501 * Initialize the scatterlist so that the magic cookie
502 * is filled if debugging is enabled
503 */
504 sg_init_table(scatterlist, num_pages);
505 /* Now set the pages for each scatterlist */
506 for (i = 0; i < num_pages; i++)
507 sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0);
508
509 dma_buffers = dma_map_sg(g_dev,
510 scatterlist,
511 num_pages,
512 pagelistinfo->dma_dir);
513
514 if (dma_buffers == 0) {
515 cleaup_pagelistinfo(pagelistinfo);
516 return NULL;
517 }
518
519 pagelistinfo->scatterlist_mapped = 1;
520
521 /* Combine adjacent blocks for performance */
522 k = 0;
523 for_each_sg(scatterlist, sg, dma_buffers, i) {
524 u32 len = sg_dma_len(sg);
525 u32 addr = sg_dma_address(sg);
526
527 /* Note: addrs is the address + page_count - 1
528 * The firmware expects the block to be page
529 * aligned and a multiple of the page size
530 */
531 WARN_ON(len == 0);
532 WARN_ON(len & ~PAGE_MASK);
533 WARN_ON(addr & ~PAGE_MASK);
534 if (k > 0 &&
535 ((addrs[k - 1] & PAGE_MASK) |
536 ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT)
537 == addr) {
538 addrs[k - 1] += (len >> PAGE_SHIFT);
539 } else {
540 addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1);
541 }
542 }
543
544 /* Partial cache lines (fragments) require special measures */
545 if ((type == PAGELIST_READ) &&
546 ((pagelist->offset & (g_cache_line_size - 1)) ||
547 ((pagelist->offset + pagelist->length) &
548 (g_cache_line_size - 1)))) {
549 char *fragments;
550
551 if (down_interruptible(&g_free_fragments_sema) != 0) {
552 cleaup_pagelistinfo(pagelistinfo);
553 return NULL;
554 }
555
556 WARN_ON(g_free_fragments == NULL);
557
558 down(&g_free_fragments_mutex);
559 fragments = g_free_fragments;
560 WARN_ON(fragments == NULL);
561 g_free_fragments = *(char **) g_free_fragments;
562 up(&g_free_fragments_mutex);
563 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
564 (fragments - g_fragments_base) / g_fragments_size;
565 }
566
567 return pagelistinfo;
568 }
569
570 static void
571 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
572 int actual)
573 {
574 unsigned int i;
575 PAGELIST_T *pagelist = pagelistinfo->pagelist;
576 struct page **pages = pagelistinfo->pages;
577 unsigned int num_pages = pagelistinfo->num_pages;
578
579 vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %pK, %d",
580 pagelistinfo->pagelist, actual);
581
582 /*
583 * NOTE: dma_unmap_sg must be called before the
584 * cpu can touch any of the data/pages.
585 */
586 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
587 pagelistinfo->num_pages, pagelistinfo->dma_dir);
588 pagelistinfo->scatterlist_mapped = 0;
589
590 /* Deal with any partial cache lines (fragments) */
591 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
592 char *fragments = g_fragments_base +
593 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
594 g_fragments_size;
595 int head_bytes, tail_bytes;
596 head_bytes = (g_cache_line_size - pagelist->offset) &
597 (g_cache_line_size - 1);
598 tail_bytes = (pagelist->offset + actual) &
599 (g_cache_line_size - 1);
600
601 if ((actual >= 0) && (head_bytes != 0)) {
602 if (head_bytes > actual)
603 head_bytes = actual;
604
605 memcpy((char *)page_address(pages[0]) +
606 pagelist->offset,
607 fragments,
608 head_bytes);
609 }
610 if ((actual >= 0) && (head_bytes < actual) &&
611 (tail_bytes != 0)) {
612 memcpy((char *)page_address(pages[num_pages - 1]) +
613 ((pagelist->offset + actual) &
614 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
615 fragments + g_cache_line_size,
616 tail_bytes);
617 }
618
619 down(&g_free_fragments_mutex);
620 *(char **)fragments = g_free_fragments;
621 g_free_fragments = fragments;
622 up(&g_free_fragments_mutex);
623 up(&g_free_fragments_sema);
624 }
625
626 /* Need to mark all the pages dirty. */
627 if (pagelist->type != PAGELIST_WRITE &&
628 pagelistinfo->pages_need_release) {
629 for (i = 0; i < num_pages; i++)
630 set_page_dirty(pages[i]);
631 }
632
633 cleaup_pagelistinfo(pagelistinfo);
634 }