]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
staging: vc04_services: Fix bulk cache maintenance
[mirror_ubuntu-artful-kernel.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_2835_arm.c
CommitLineData
71bad7f0 1/**
2 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions, and the following disclaimer,
9 * without modification.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The names of the above-listed copyright holders may not be used
14 * to endorse or promote products derived from this software without
15 * specific prior written permission.
16 *
17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2, as published by the Free
19 * Software Foundation.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <linux/kernel.h>
35#include <linux/types.h>
36#include <linux/errno.h>
37#include <linux/interrupt.h>
38#include <linux/pagemap.h>
39#include <linux/dma-mapping.h>
40#include <linux/version.h>
41#include <linux/io.h>
42#include <linux/platform_device.h>
43#include <linux/uaccess.h>
44#include <linux/of.h>
45#include <asm/pgtable.h>
46#include <soc/bcm2835/raspberrypi-firmware.h>
47
71bad7f0 48#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
49
71bad7f0 50#include "vchiq_arm.h"
71bad7f0 51#include "vchiq_connected.h"
52#include "vchiq_killable.h"
946d61ac 53#include "vchiq_pagelist.h"
71bad7f0 54
55#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
56
946d61ac
SW
57#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
58#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
59
71bad7f0 60#define BELL0 0x00
61#define BELL2 0x08
62
63typedef struct vchiq_2835_state_struct {
f306ed07
SW
64 int inited;
65 VCHIQ_ARM_STATE_T arm_state;
71bad7f0 66} VCHIQ_2835_ARM_STATE_T;
67
4807f2c0
MZ
68struct vchiq_pagelist_info {
69 PAGELIST_T *pagelist;
70 size_t pagelist_buffer_size;
71 dma_addr_t dma_addr;
72 enum dma_data_direction dma_dir;
73 unsigned int num_pages;
74 unsigned int pages_need_release;
75 struct page **pages;
76 struct scatterlist *scatterlist;
77 unsigned int scatterlist_mapped;
78};
79
71bad7f0 80static void __iomem *g_regs;
81static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE);
82static unsigned int g_fragments_size;
83static char *g_fragments_base;
84static char *g_free_fragments;
85static struct semaphore g_free_fragments_sema;
cf9caf19 86static struct device *g_dev;
71bad7f0 87
88extern int vchiq_arm_log_level;
89
90static DEFINE_SEMAPHORE(g_free_fragments_mutex);
91
92static irqreturn_t
93vchiq_doorbell_irq(int irq, void *dev_id);
94
4807f2c0 95static struct vchiq_pagelist_info *
71bad7f0 96create_pagelist(char __user *buf, size_t count, unsigned short type,
4807f2c0 97 struct task_struct *task);
71bad7f0 98
99static void
4807f2c0
MZ
100free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
101 int actual);
71bad7f0 102
103int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
104{
105 struct device *dev = &pdev->dev;
106 struct rpi_firmware *fw = platform_get_drvdata(pdev);
107 VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
108 struct resource *res;
109 void *slot_mem;
110 dma_addr_t slot_phys;
111 u32 channelbase;
112 int slot_mem_size, frag_mem_size;
113 int err, irq, i;
114
c01cc53d
MZ
115 /*
116 * VCHI messages between the CPU and firmware use
117 * 32-bit bus addresses.
118 */
119 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
120
121 if (err < 0)
122 return err;
123
6cf1bf63 124 err = of_property_read_u32(dev->of_node, "cache-line-size",
71bad7f0 125 &g_cache_line_size);
6cf1bf63
MZ
126
127 if (err) {
128 dev_err(dev, "Missing cache-line-size property\n");
129 return -ENODEV;
130 }
131
71bad7f0 132 g_fragments_size = 2 * g_cache_line_size;
133
134 /* Allocate space for the channels in coherent memory */
135 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
136 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
137
138 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
139 &slot_phys, GFP_KERNEL);
140 if (!slot_mem) {
141 dev_err(dev, "could not allocate DMA memory\n");
142 return -ENOMEM;
143 }
144
cf9caf19 145 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
71bad7f0 146
147 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
148 if (!vchiq_slot_zero)
149 return -EINVAL;
150
151 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
152 (int)slot_phys + slot_mem_size;
153 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
154 MAX_FRAGMENTS;
155
156 g_fragments_base = (char *)slot_mem + slot_mem_size;
157 slot_mem_size += frag_mem_size;
158
159 g_free_fragments = g_fragments_base;
160 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
161 *(char **)&g_fragments_base[i*g_fragments_size] =
162 &g_fragments_base[(i + 1)*g_fragments_size];
163 }
164 *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
165 sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
166
167 if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
168 return -EINVAL;
169
170 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
171 g_regs = devm_ioremap_resource(&pdev->dev, res);
172 if (IS_ERR(g_regs))
173 return PTR_ERR(g_regs);
174
175 irq = platform_get_irq(pdev, 0);
176 if (irq <= 0) {
177 dev_err(dev, "failed to get IRQ\n");
178 return irq;
179 }
180
181 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
182 "VCHIQ doorbell", state);
183 if (err) {
184 dev_err(dev, "failed to register irq=%d\n", irq);
185 return err;
186 }
187
188 /* Send the base address of the slots to VideoCore */
189 channelbase = slot_phys;
190 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
191 &channelbase, sizeof(channelbase));
192 if (err || channelbase) {
193 dev_err(dev, "failed to set channelbase\n");
194 return err ? : -ENXIO;
195 }
196
cf9caf19 197 g_dev = dev;
71bad7f0 198 vchiq_log_info(vchiq_arm_log_level,
df044ebf
GKH
199 "vchiq_init - done (slots %pK, phys %pad)",
200 vchiq_slot_zero, &slot_phys);
71bad7f0 201
202 vchiq_call_connected_callbacks();
203
f306ed07 204 return 0;
71bad7f0 205}
206
207VCHIQ_STATUS_T
208vchiq_platform_init_state(VCHIQ_STATE_T *state)
209{
f306ed07
SW
210 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
211 state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
b33050d0
SW
212 ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 1;
213 status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state);
214 if (status != VCHIQ_SUCCESS)
f306ed07 215 {
b33050d0 216 ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 0;
f306ed07
SW
217 }
218 return status;
71bad7f0 219}
220
221VCHIQ_ARM_STATE_T*
222vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
223{
b33050d0 224 if (!((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited)
f306ed07
SW
225 {
226 BUG();
227 }
b33050d0 228 return &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state;
71bad7f0 229}
230
231void
232remote_event_signal(REMOTE_EVENT_T *event)
233{
234 wmb();
235
236 event->fired = 1;
237
35b7ebda 238 dsb(sy); /* data barrier operation */
71bad7f0 239
240 if (event->armed)
241 writel(0, g_regs + BELL2); /* trigger vc interrupt */
242}
243
71bad7f0 244VCHIQ_STATUS_T
245vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
246 void *offset, int size, int dir)
247{
4807f2c0 248 struct vchiq_pagelist_info *pagelistinfo;
71bad7f0 249
250 WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
251
4807f2c0
MZ
252 pagelistinfo = create_pagelist((char __user *)offset, size,
253 (dir == VCHIQ_BULK_RECEIVE)
254 ? PAGELIST_READ
255 : PAGELIST_WRITE,
256 current);
cf9caf19 257
4807f2c0 258 if (!pagelistinfo)
71bad7f0 259 return VCHIQ_ERROR;
260
261 bulk->handle = memhandle;
4807f2c0 262 bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
71bad7f0 263
4807f2c0
MZ
264 /*
265 * Store the pagelistinfo address in remote_data,
266 * which isn't used by the slave.
267 */
268 bulk->remote_data = pagelistinfo;
71bad7f0 269
270 return VCHIQ_SUCCESS;
271}
272
273void
274vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
275{
276 if (bulk && bulk->remote_data && bulk->actual)
4807f2c0
MZ
277 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
278 bulk->actual);
71bad7f0 279}
280
281void
282vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
283{
284 /*
285 * This should only be called on the master (VideoCore) side, but
286 * provide an implementation to avoid the need for ifdefery.
287 */
288 BUG();
289}
290
291void
292vchiq_dump_platform_state(void *dump_context)
293{
294 char buf[80];
295 int len;
296 len = snprintf(buf, sizeof(buf),
297 " Platform: 2835 (VC master)");
298 vchiq_dump(dump_context, buf, len + 1);
299}
300
301VCHIQ_STATUS_T
302vchiq_platform_suspend(VCHIQ_STATE_T *state)
303{
f306ed07 304 return VCHIQ_ERROR;
71bad7f0 305}
306
307VCHIQ_STATUS_T
308vchiq_platform_resume(VCHIQ_STATE_T *state)
309{
f306ed07 310 return VCHIQ_SUCCESS;
71bad7f0 311}
312
313void
314vchiq_platform_paused(VCHIQ_STATE_T *state)
315{
316}
317
318void
319vchiq_platform_resumed(VCHIQ_STATE_T *state)
320{
321}
322
323int
b33050d0 324vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state)
71bad7f0 325{
f306ed07 326 return 1; // autosuspend not supported - videocore always wanted
71bad7f0 327}
328
329int
330vchiq_platform_use_suspend_timer(void)
331{
f306ed07 332 return 0;
71bad7f0 333}
334void
335vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
336{
337 vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
338}
339void
340vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
341{
342 (void)state;
343}
344/*
345 * Local functions
346 */
347
348static irqreturn_t
349vchiq_doorbell_irq(int irq, void *dev_id)
350{
351 VCHIQ_STATE_T *state = dev_id;
352 irqreturn_t ret = IRQ_NONE;
353 unsigned int status;
354
355 /* Read (and clear) the doorbell */
356 status = readl(g_regs + BELL0);
357
358 if (status & 0x4) { /* Was the doorbell rung? */
359 remote_event_pollall(state);
360 ret = IRQ_HANDLED;
361 }
362
363 return ret;
364}
365
4807f2c0
MZ
366static void
367cleaup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
368{
369 if (pagelistinfo->scatterlist_mapped) {
370 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
371 pagelistinfo->num_pages, pagelistinfo->dma_dir);
372 }
373
374 if (pagelistinfo->pages_need_release) {
375 unsigned int i;
376
377 for (i = 0; i < pagelistinfo->num_pages; i++)
378 put_page(pagelistinfo->pages[i]);
379 }
380
381 dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
382 pagelistinfo->pagelist, pagelistinfo->dma_addr);
383}
384
71bad7f0 385/* There is a potential problem with partial cache lines (pages?)
386** at the ends of the block when reading. If the CPU accessed anything in
387** the same line (page?) then it may have pulled old data into the cache,
388** obscuring the new data underneath. We can solve this by transferring the
389** partial cache lines separately, and allowing the ARM to copy into the
390** cached area.
71bad7f0 391*/
392
4807f2c0 393static struct vchiq_pagelist_info *
71bad7f0 394create_pagelist(char __user *buf, size_t count, unsigned short type,
4807f2c0 395 struct task_struct *task)
71bad7f0 396{
397 PAGELIST_T *pagelist;
4807f2c0 398 struct vchiq_pagelist_info *pagelistinfo;
71bad7f0 399 struct page **pages;
cf9caf19
MZ
400 u32 *addrs;
401 unsigned int num_pages, offset, i, k;
402 int actual_pages;
cf9caf19
MZ
403 size_t pagelist_size;
404 struct scatterlist *scatterlist, *sg;
405 int dma_buffers;
4807f2c0 406 dma_addr_t dma_addr;
71bad7f0 407
cf9caf19 408 offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
71bad7f0 409 num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
410
cf9caf19 411 pagelist_size = sizeof(PAGELIST_T) +
4807f2c0 412 (num_pages * sizeof(u32)) +
cf9caf19 413 (num_pages * sizeof(pages[0]) +
4807f2c0
MZ
414 (num_pages * sizeof(struct scatterlist))) +
415 sizeof(struct vchiq_pagelist_info);
cf9caf19 416
71bad7f0 417 /* Allocate enough storage to hold the page pointers and the page
418 ** list
419 */
cf9caf19
MZ
420 pagelist = dma_zalloc_coherent(g_dev,
421 pagelist_size,
4807f2c0 422 &dma_addr,
cf9caf19 423 GFP_KERNEL);
71bad7f0 424
df044ebf
GKH
425 vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK",
426 pagelist);
71bad7f0 427 if (!pagelist)
4807f2c0
MZ
428 return NULL;
429
430 addrs = pagelist->addrs;
431 pages = (struct page **)(addrs + num_pages);
432 scatterlist = (struct scatterlist *)(pages + num_pages);
433 pagelistinfo = (struct vchiq_pagelist_info *)
434 (scatterlist + num_pages);
71bad7f0 435
4807f2c0
MZ
436 pagelist->length = count;
437 pagelist->type = type;
438 pagelist->offset = offset;
439
440 /* Populate the fields of the pagelistinfo structure */
441 pagelistinfo->pagelist = pagelist;
442 pagelistinfo->pagelist_buffer_size = pagelist_size;
443 pagelistinfo->dma_addr = dma_addr;
444 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
445 DMA_TO_DEVICE : DMA_FROM_DEVICE;
446 pagelistinfo->num_pages = num_pages;
447 pagelistinfo->pages_need_release = 0;
448 pagelistinfo->pages = pages;
449 pagelistinfo->scatterlist = scatterlist;
450 pagelistinfo->scatterlist_mapped = 0;
71bad7f0 451
452 if (is_vmalloc_addr(buf)) {
71bad7f0 453 unsigned long length = count;
454 unsigned int off = offset;
455
456 for (actual_pages = 0; actual_pages < num_pages;
457 actual_pages++) {
458 struct page *pg = vmalloc_to_page(buf + (actual_pages *
459 PAGE_SIZE));
460 size_t bytes = PAGE_SIZE - off;
461
462 if (bytes > length)
463 bytes = length;
464 pages[actual_pages] = pg;
71bad7f0 465 length -= bytes;
466 off = 0;
467 }
4807f2c0 468 /* do not try and release vmalloc pages */
71bad7f0 469 } else {
470 down_read(&task->mm->mmap_sem);
166beccd 471 actual_pages = get_user_pages(
71bad7f0 472 (unsigned long)buf & ~(PAGE_SIZE - 1),
473 num_pages,
768ae309 474 (type == PAGELIST_READ) ? FOLL_WRITE : 0,
71bad7f0 475 pages,
476 NULL /*vmas */);
477 up_read(&task->mm->mmap_sem);
478
479 if (actual_pages != num_pages) {
480 vchiq_log_info(vchiq_arm_log_level,
481 "create_pagelist - only %d/%d pages locked",
482 actual_pages,
483 num_pages);
484
485 /* This is probably due to the process being killed */
486 while (actual_pages > 0)
487 {
488 actual_pages--;
232664b3 489 put_page(pages[actual_pages]);
71bad7f0 490 }
4807f2c0
MZ
491 cleaup_pagelistinfo(pagelistinfo);
492 return NULL;
71bad7f0 493 }
4807f2c0
MZ
494 /* release user pages */
495 pagelistinfo->pages_need_release = 1;
71bad7f0 496 }
497
dc5424ae
MZ
498 /*
499 * Initialize the scatterlist so that the magic cookie
500 * is filled if debugging is enabled
501 */
502 sg_init_table(scatterlist, num_pages);
503 /* Now set the pages for each scatterlist */
2b83bd4e
PE
504 for (i = 0; i < num_pages; i++) {
505 unsigned int len = PAGE_SIZE - offset;
506
507 if (len > count)
508 len = count;
509 sg_set_page(scatterlist + i, pages[i], len, offset);
510 offset = 0;
511 count -= len;
512 }
cf9caf19
MZ
513
514 dma_buffers = dma_map_sg(g_dev,
515 scatterlist,
516 num_pages,
4807f2c0 517 pagelistinfo->dma_dir);
71bad7f0 518
cf9caf19 519 if (dma_buffers == 0) {
4807f2c0
MZ
520 cleaup_pagelistinfo(pagelistinfo);
521 return NULL;
cf9caf19
MZ
522 }
523
4807f2c0
MZ
524 pagelistinfo->scatterlist_mapped = 1;
525
cf9caf19
MZ
526 /* Combine adjacent blocks for performance */
527 k = 0;
528 for_each_sg(scatterlist, sg, dma_buffers, i) {
529 u32 len = sg_dma_len(sg);
530 u32 addr = sg_dma_address(sg);
531
532 /* Note: addrs is the address + page_count - 1
2b83bd4e 533 * The firmware expects blocks after the first to be page-
cf9caf19
MZ
534 * aligned and a multiple of the page size
535 */
536 WARN_ON(len == 0);
2b83bd4e
PE
537 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
538 WARN_ON(i && (addr & ~PAGE_MASK));
cf9caf19 539 if (k > 0 &&
2b83bd4e
PE
540 ((addrs[k - 1] & PAGE_MASK) +
541 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
542 == (addr & PAGE_MASK))
543 addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
544 else
545 addrs[k++] = (addr & PAGE_MASK) |
546 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
71bad7f0 547 }
548
71bad7f0 549 /* Partial cache lines (fragments) require special measures */
550 if ((type == PAGELIST_READ) &&
551 ((pagelist->offset & (g_cache_line_size - 1)) ||
552 ((pagelist->offset + pagelist->length) &
553 (g_cache_line_size - 1)))) {
554 char *fragments;
555
556 if (down_interruptible(&g_free_fragments_sema) != 0) {
4807f2c0
MZ
557 cleaup_pagelistinfo(pagelistinfo);
558 return NULL;
71bad7f0 559 }
560
561 WARN_ON(g_free_fragments == NULL);
562
563 down(&g_free_fragments_mutex);
564 fragments = g_free_fragments;
565 WARN_ON(fragments == NULL);
566 g_free_fragments = *(char **) g_free_fragments;
567 up(&g_free_fragments_mutex);
568 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
569 (fragments - g_fragments_base) / g_fragments_size;
570 }
571
4807f2c0 572 return pagelistinfo;
71bad7f0 573}
574
575static void
4807f2c0
MZ
576free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
577 int actual)
71bad7f0 578{
4807f2c0
MZ
579 unsigned int i;
580 PAGELIST_T *pagelist = pagelistinfo->pagelist;
581 struct page **pages = pagelistinfo->pages;
582 unsigned int num_pages = pagelistinfo->num_pages;
71bad7f0 583
df044ebf 584 vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %pK, %d",
4807f2c0 585 pagelistinfo->pagelist, actual);
71bad7f0 586
4807f2c0
MZ
587 /*
588 * NOTE: dma_unmap_sg must be called before the
589 * cpu can touch any of the data/pages.
590 */
591 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
592 pagelistinfo->num_pages, pagelistinfo->dma_dir);
593 pagelistinfo->scatterlist_mapped = 0;
71bad7f0 594
595 /* Deal with any partial cache lines (fragments) */
596 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
597 char *fragments = g_fragments_base +
598 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
599 g_fragments_size;
600 int head_bytes, tail_bytes;
601 head_bytes = (g_cache_line_size - pagelist->offset) &
602 (g_cache_line_size - 1);
603 tail_bytes = (pagelist->offset + actual) &
604 (g_cache_line_size - 1);
605
606 if ((actual >= 0) && (head_bytes != 0)) {
607 if (head_bytes > actual)
608 head_bytes = actual;
609
610 memcpy((char *)page_address(pages[0]) +
611 pagelist->offset,
612 fragments,
613 head_bytes);
614 }
615 if ((actual >= 0) && (head_bytes < actual) &&
616 (tail_bytes != 0)) {
617 memcpy((char *)page_address(pages[num_pages - 1]) +
618 ((pagelist->offset + actual) &
619 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
620 fragments + g_cache_line_size,
621 tail_bytes);
622 }
623
624 down(&g_free_fragments_mutex);
625 *(char **)fragments = g_free_fragments;
626 g_free_fragments = fragments;
627 up(&g_free_fragments_mutex);
628 up(&g_free_fragments_sema);
629 }
630
4807f2c0
MZ
631 /* Need to mark all the pages dirty. */
632 if (pagelist->type != PAGELIST_WRITE &&
633 pagelistinfo->pages_need_release) {
634 for (i = 0; i < num_pages; i++)
635 set_page_dirty(pages[i]);
71bad7f0 636 }
637
4807f2c0 638 cleaup_pagelistinfo(pagelistinfo);
71bad7f0 639}