]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
Input: wm97xx: add new AC97 bus support
[mirror_ubuntu-focal-kernel.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_2835_arm.c
1 /**
2 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions, and the following disclaimer,
9 * without modification.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The names of the above-listed copyright holders may not be used
14 * to endorse or promote products derived from this software without
15 * specific prior written permission.
16 *
17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2, as published by the Free
19 * Software Foundation.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/pagemap.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/io.h>
41 #include <linux/platform_device.h>
42 #include <linux/uaccess.h>
43 #include <linux/mm.h>
44 #include <linux/of.h>
45 #include <soc/bcm2835/raspberrypi-firmware.h>
46
47 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
48
49 #include "vchiq_arm.h"
50 #include "vchiq_connected.h"
51 #include "vchiq_killable.h"
52 #include "vchiq_pagelist.h"
53
54 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
55
56 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
57 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
58
59 #define BELL0 0x00
60 #define BELL2 0x08
61
62 typedef struct vchiq_2835_state_struct {
63 int inited;
64 VCHIQ_ARM_STATE_T arm_state;
65 } VCHIQ_2835_ARM_STATE_T;
66
67 struct vchiq_pagelist_info {
68 PAGELIST_T *pagelist;
69 size_t pagelist_buffer_size;
70 dma_addr_t dma_addr;
71 enum dma_data_direction dma_dir;
72 unsigned int num_pages;
73 unsigned int pages_need_release;
74 struct page **pages;
75 struct scatterlist *scatterlist;
76 unsigned int scatterlist_mapped;
77 };
78
79 static void __iomem *g_regs;
80 static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE);
81 static unsigned int g_fragments_size;
82 static char *g_fragments_base;
83 static char *g_free_fragments;
84 static struct semaphore g_free_fragments_sema;
85 static struct device *g_dev;
86
87 extern int vchiq_arm_log_level;
88
89 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
90
91 static irqreturn_t
92 vchiq_doorbell_irq(int irq, void *dev_id);
93
94 static struct vchiq_pagelist_info *
95 create_pagelist(char __user *buf, size_t count, unsigned short type,
96 struct task_struct *task);
97
98 static void
99 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
100 int actual);
101
102 int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
103 {
104 struct device *dev = &pdev->dev;
105 struct rpi_firmware *fw = platform_get_drvdata(pdev);
106 VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
107 struct resource *res;
108 void *slot_mem;
109 dma_addr_t slot_phys;
110 u32 channelbase;
111 int slot_mem_size, frag_mem_size;
112 int err, irq, i;
113
114 /*
115 * VCHI messages between the CPU and firmware use
116 * 32-bit bus addresses.
117 */
118 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
119
120 if (err < 0)
121 return err;
122
123 err = of_property_read_u32(dev->of_node, "cache-line-size",
124 &g_cache_line_size);
125
126 if (err) {
127 dev_err(dev, "Missing cache-line-size property\n");
128 return -ENODEV;
129 }
130
131 g_fragments_size = 2 * g_cache_line_size;
132
133 /* Allocate space for the channels in coherent memory */
134 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
135 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
136
137 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
138 &slot_phys, GFP_KERNEL);
139 if (!slot_mem) {
140 dev_err(dev, "could not allocate DMA memory\n");
141 return -ENOMEM;
142 }
143
144 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
145
146 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
147 if (!vchiq_slot_zero)
148 return -EINVAL;
149
150 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
151 (int)slot_phys + slot_mem_size;
152 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
153 MAX_FRAGMENTS;
154
155 g_fragments_base = (char *)slot_mem + slot_mem_size;
156
157 g_free_fragments = g_fragments_base;
158 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
159 *(char **)&g_fragments_base[i*g_fragments_size] =
160 &g_fragments_base[(i + 1)*g_fragments_size];
161 }
162 *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
163 sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
164
165 if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
166 return -EINVAL;
167
168 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
169 g_regs = devm_ioremap_resource(&pdev->dev, res);
170 if (IS_ERR(g_regs))
171 return PTR_ERR(g_regs);
172
173 irq = platform_get_irq(pdev, 0);
174 if (irq <= 0) {
175 dev_err(dev, "failed to get IRQ\n");
176 return irq;
177 }
178
179 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
180 "VCHIQ doorbell", state);
181 if (err) {
182 dev_err(dev, "failed to register irq=%d\n", irq);
183 return err;
184 }
185
186 /* Send the base address of the slots to VideoCore */
187 channelbase = slot_phys;
188 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
189 &channelbase, sizeof(channelbase));
190 if (err || channelbase) {
191 dev_err(dev, "failed to set channelbase\n");
192 return err ? : -ENXIO;
193 }
194
195 g_dev = dev;
196 vchiq_log_info(vchiq_arm_log_level,
197 "vchiq_init - done (slots %pK, phys %pad)",
198 vchiq_slot_zero, &slot_phys);
199
200 vchiq_call_connected_callbacks();
201
202 return 0;
203 }
204
205 VCHIQ_STATUS_T
206 vchiq_platform_init_state(VCHIQ_STATE_T *state)
207 {
208 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
209
210 state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
211 ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 1;
212 status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state);
213 if (status != VCHIQ_SUCCESS)
214 {
215 ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 0;
216 }
217 return status;
218 }
219
220 VCHIQ_ARM_STATE_T*
221 vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
222 {
223 if (!((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited)
224 {
225 BUG();
226 }
227 return &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state;
228 }
229
230 void
231 remote_event_signal(REMOTE_EVENT_T *event)
232 {
233 wmb();
234
235 event->fired = 1;
236
237 dsb(sy); /* data barrier operation */
238
239 if (event->armed)
240 writel(0, g_regs + BELL2); /* trigger vc interrupt */
241 }
242
243 VCHIQ_STATUS_T
244 vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
245 void *offset, int size, int dir)
246 {
247 struct vchiq_pagelist_info *pagelistinfo;
248
249 WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
250
251 pagelistinfo = create_pagelist((char __user *)offset, size,
252 (dir == VCHIQ_BULK_RECEIVE)
253 ? PAGELIST_READ
254 : PAGELIST_WRITE,
255 current);
256
257 if (!pagelistinfo)
258 return VCHIQ_ERROR;
259
260 bulk->handle = memhandle;
261 bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
262
263 /*
264 * Store the pagelistinfo address in remote_data,
265 * which isn't used by the slave.
266 */
267 bulk->remote_data = pagelistinfo;
268
269 return VCHIQ_SUCCESS;
270 }
271
272 void
273 vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
274 {
275 if (bulk && bulk->remote_data && bulk->actual)
276 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
277 bulk->actual);
278 }
279
280 void
281 vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
282 {
283 /*
284 * This should only be called on the master (VideoCore) side, but
285 * provide an implementation to avoid the need for ifdefery.
286 */
287 BUG();
288 }
289
290 void
291 vchiq_dump_platform_state(void *dump_context)
292 {
293 char buf[80];
294 int len;
295
296 len = snprintf(buf, sizeof(buf),
297 " Platform: 2835 (VC master)");
298 vchiq_dump(dump_context, buf, len + 1);
299 }
300
301 VCHIQ_STATUS_T
302 vchiq_platform_suspend(VCHIQ_STATE_T *state)
303 {
304 return VCHIQ_ERROR;
305 }
306
307 VCHIQ_STATUS_T
308 vchiq_platform_resume(VCHIQ_STATE_T *state)
309 {
310 return VCHIQ_SUCCESS;
311 }
312
313 void
314 vchiq_platform_paused(VCHIQ_STATE_T *state)
315 {
316 }
317
318 void
319 vchiq_platform_resumed(VCHIQ_STATE_T *state)
320 {
321 }
322
323 int
324 vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state)
325 {
326 return 1; // autosuspend not supported - videocore always wanted
327 }
328
329 int
330 vchiq_platform_use_suspend_timer(void)
331 {
332 return 0;
333 }
334 void
335 vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
336 {
337 vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
338 }
339 void
340 vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
341 {
342 (void)state;
343 }
344 /*
345 * Local functions
346 */
347
348 static irqreturn_t
349 vchiq_doorbell_irq(int irq, void *dev_id)
350 {
351 VCHIQ_STATE_T *state = dev_id;
352 irqreturn_t ret = IRQ_NONE;
353 unsigned int status;
354
355 /* Read (and clear) the doorbell */
356 status = readl(g_regs + BELL0);
357
358 if (status & 0x4) { /* Was the doorbell rung? */
359 remote_event_pollall(state);
360 ret = IRQ_HANDLED;
361 }
362
363 return ret;
364 }
365
366 static void
367 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
368 {
369 if (pagelistinfo->scatterlist_mapped) {
370 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
371 pagelistinfo->num_pages, pagelistinfo->dma_dir);
372 }
373
374 if (pagelistinfo->pages_need_release) {
375 unsigned int i;
376
377 for (i = 0; i < pagelistinfo->num_pages; i++)
378 put_page(pagelistinfo->pages[i]);
379 }
380
381 dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
382 pagelistinfo->pagelist, pagelistinfo->dma_addr);
383 }
384
385 /* There is a potential problem with partial cache lines (pages?)
386 ** at the ends of the block when reading. If the CPU accessed anything in
387 ** the same line (page?) then it may have pulled old data into the cache,
388 ** obscuring the new data underneath. We can solve this by transferring the
389 ** partial cache lines separately, and allowing the ARM to copy into the
390 ** cached area.
391 */
392
393 static struct vchiq_pagelist_info *
394 create_pagelist(char __user *buf, size_t count, unsigned short type,
395 struct task_struct *task)
396 {
397 PAGELIST_T *pagelist;
398 struct vchiq_pagelist_info *pagelistinfo;
399 struct page **pages;
400 u32 *addrs;
401 unsigned int num_pages, offset, i, k;
402 int actual_pages;
403 size_t pagelist_size;
404 struct scatterlist *scatterlist, *sg;
405 int dma_buffers;
406 dma_addr_t dma_addr;
407
408 offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
409 num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
410
411 pagelist_size = sizeof(PAGELIST_T) +
412 (num_pages * sizeof(u32)) +
413 (num_pages * sizeof(pages[0]) +
414 (num_pages * sizeof(struct scatterlist))) +
415 sizeof(struct vchiq_pagelist_info);
416
417 /* Allocate enough storage to hold the page pointers and the page
418 ** list
419 */
420 pagelist = dma_zalloc_coherent(g_dev,
421 pagelist_size,
422 &dma_addr,
423 GFP_KERNEL);
424
425 vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK",
426 pagelist);
427 if (!pagelist)
428 return NULL;
429
430 addrs = pagelist->addrs;
431 pages = (struct page **)(addrs + num_pages);
432 scatterlist = (struct scatterlist *)(pages + num_pages);
433 pagelistinfo = (struct vchiq_pagelist_info *)
434 (scatterlist + num_pages);
435
436 pagelist->length = count;
437 pagelist->type = type;
438 pagelist->offset = offset;
439
440 /* Populate the fields of the pagelistinfo structure */
441 pagelistinfo->pagelist = pagelist;
442 pagelistinfo->pagelist_buffer_size = pagelist_size;
443 pagelistinfo->dma_addr = dma_addr;
444 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
445 DMA_TO_DEVICE : DMA_FROM_DEVICE;
446 pagelistinfo->num_pages = num_pages;
447 pagelistinfo->pages_need_release = 0;
448 pagelistinfo->pages = pages;
449 pagelistinfo->scatterlist = scatterlist;
450 pagelistinfo->scatterlist_mapped = 0;
451
452 if (is_vmalloc_addr(buf)) {
453 unsigned long length = count;
454 unsigned int off = offset;
455
456 for (actual_pages = 0; actual_pages < num_pages;
457 actual_pages++) {
458 struct page *pg = vmalloc_to_page(buf + (actual_pages *
459 PAGE_SIZE));
460 size_t bytes = PAGE_SIZE - off;
461
462 if (!pg) {
463 cleanup_pagelistinfo(pagelistinfo);
464 return NULL;
465 }
466
467 if (bytes > length)
468 bytes = length;
469 pages[actual_pages] = pg;
470 length -= bytes;
471 off = 0;
472 }
473 /* do not try and release vmalloc pages */
474 } else {
475 down_read(&task->mm->mmap_sem);
476 actual_pages = get_user_pages(
477 (unsigned long)buf & PAGE_MASK,
478 num_pages,
479 (type == PAGELIST_READ) ? FOLL_WRITE : 0,
480 pages,
481 NULL /*vmas */);
482 up_read(&task->mm->mmap_sem);
483
484 if (actual_pages != num_pages) {
485 vchiq_log_info(vchiq_arm_log_level,
486 "create_pagelist - only %d/%d pages locked",
487 actual_pages,
488 num_pages);
489
490 /* This is probably due to the process being killed */
491 while (actual_pages > 0)
492 {
493 actual_pages--;
494 put_page(pages[actual_pages]);
495 }
496 cleanup_pagelistinfo(pagelistinfo);
497 return NULL;
498 }
499 /* release user pages */
500 pagelistinfo->pages_need_release = 1;
501 }
502
503 /*
504 * Initialize the scatterlist so that the magic cookie
505 * is filled if debugging is enabled
506 */
507 sg_init_table(scatterlist, num_pages);
508 /* Now set the pages for each scatterlist */
509 for (i = 0; i < num_pages; i++) {
510 unsigned int len = PAGE_SIZE - offset;
511
512 if (len > count)
513 len = count;
514 sg_set_page(scatterlist + i, pages[i], len, offset);
515 offset = 0;
516 count -= len;
517 }
518
519 dma_buffers = dma_map_sg(g_dev,
520 scatterlist,
521 num_pages,
522 pagelistinfo->dma_dir);
523
524 if (dma_buffers == 0) {
525 cleanup_pagelistinfo(pagelistinfo);
526 return NULL;
527 }
528
529 pagelistinfo->scatterlist_mapped = 1;
530
531 /* Combine adjacent blocks for performance */
532 k = 0;
533 for_each_sg(scatterlist, sg, dma_buffers, i) {
534 u32 len = sg_dma_len(sg);
535 u32 addr = sg_dma_address(sg);
536
537 /* Note: addrs is the address + page_count - 1
538 * The firmware expects blocks after the first to be page-
539 * aligned and a multiple of the page size
540 */
541 WARN_ON(len == 0);
542 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
543 WARN_ON(i && (addr & ~PAGE_MASK));
544 if (k > 0 &&
545 ((addrs[k - 1] & PAGE_MASK) +
546 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
547 == (addr & PAGE_MASK))
548 addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
549 else
550 addrs[k++] = (addr & PAGE_MASK) |
551 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
552 }
553
554 /* Partial cache lines (fragments) require special measures */
555 if ((type == PAGELIST_READ) &&
556 ((pagelist->offset & (g_cache_line_size - 1)) ||
557 ((pagelist->offset + pagelist->length) &
558 (g_cache_line_size - 1)))) {
559 char *fragments;
560
561 if (down_interruptible(&g_free_fragments_sema) != 0) {
562 cleanup_pagelistinfo(pagelistinfo);
563 return NULL;
564 }
565
566 WARN_ON(g_free_fragments == NULL);
567
568 down(&g_free_fragments_mutex);
569 fragments = g_free_fragments;
570 WARN_ON(fragments == NULL);
571 g_free_fragments = *(char **) g_free_fragments;
572 up(&g_free_fragments_mutex);
573 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
574 (fragments - g_fragments_base) / g_fragments_size;
575 }
576
577 return pagelistinfo;
578 }
579
580 static void
581 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
582 int actual)
583 {
584 PAGELIST_T *pagelist = pagelistinfo->pagelist;
585 struct page **pages = pagelistinfo->pages;
586 unsigned int num_pages = pagelistinfo->num_pages;
587
588 vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %pK, %d",
589 pagelistinfo->pagelist, actual);
590
591 /*
592 * NOTE: dma_unmap_sg must be called before the
593 * cpu can touch any of the data/pages.
594 */
595 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
596 pagelistinfo->num_pages, pagelistinfo->dma_dir);
597 pagelistinfo->scatterlist_mapped = 0;
598
599 /* Deal with any partial cache lines (fragments) */
600 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
601 char *fragments = g_fragments_base +
602 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
603 g_fragments_size;
604 int head_bytes, tail_bytes;
605
606 head_bytes = (g_cache_line_size - pagelist->offset) &
607 (g_cache_line_size - 1);
608 tail_bytes = (pagelist->offset + actual) &
609 (g_cache_line_size - 1);
610
611 if ((actual >= 0) && (head_bytes != 0)) {
612 if (head_bytes > actual)
613 head_bytes = actual;
614
615 memcpy((char *)page_address(pages[0]) +
616 pagelist->offset,
617 fragments,
618 head_bytes);
619 }
620 if ((actual >= 0) && (head_bytes < actual) &&
621 (tail_bytes != 0)) {
622 memcpy((char *)page_address(pages[num_pages - 1]) +
623 ((pagelist->offset + actual) &
624 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
625 fragments + g_cache_line_size,
626 tail_bytes);
627 }
628
629 down(&g_free_fragments_mutex);
630 *(char **)fragments = g_free_fragments;
631 g_free_fragments = fragments;
632 up(&g_free_fragments_mutex);
633 up(&g_free_fragments_sema);
634 }
635
636 /* Need to mark all the pages dirty. */
637 if (pagelist->type != PAGELIST_WRITE &&
638 pagelistinfo->pages_need_release) {
639 unsigned int i;
640
641 for (i = 0; i < num_pages; i++)
642 set_page_dirty(pages[i]);
643 }
644
645 cleanup_pagelistinfo(pagelistinfo);
646 }