]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/media/video/videobuf-dma-contig.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-artful-kernel.git] / drivers / media / video / videobuf-dma-contig.c
1 /*
2 * helper functions for physically contiguous capture buffers
3 *
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
6 *
7 * Copyright (c) 2008 Magnus Damm
8 *
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
15 */
16
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/pagemap.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <media/videobuf-dma-contig.h>
25
26 struct videobuf_dma_contig_memory {
27 u32 magic;
28 void *vaddr;
29 dma_addr_t dma_handle;
30 unsigned long size;
31 int is_userptr;
32 };
33
34 #define MAGIC_DC_MEM 0x0733ac61
35 #define MAGIC_CHECK(is, should) \
36 if (unlikely((is) != (should))) { \
37 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
38 BUG(); \
39 }
40
41 static void
42 videobuf_vm_open(struct vm_area_struct *vma)
43 {
44 struct videobuf_mapping *map = vma->vm_private_data;
45
46 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
47 map, map->count, vma->vm_start, vma->vm_end);
48
49 map->count++;
50 }
51
52 static void videobuf_vm_close(struct vm_area_struct *vma)
53 {
54 struct videobuf_mapping *map = vma->vm_private_data;
55 struct videobuf_queue *q = map->q;
56 int i;
57
58 dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
59 map, map->count, vma->vm_start, vma->vm_end);
60
61 map->count--;
62 if (0 == map->count) {
63 struct videobuf_dma_contig_memory *mem;
64
65 dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
66 mutex_lock(&q->vb_lock);
67
68 /* We need first to cancel streams, before unmapping */
69 if (q->streaming)
70 videobuf_queue_cancel(q);
71
72 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
73 if (NULL == q->bufs[i])
74 continue;
75
76 if (q->bufs[i]->map != map)
77 continue;
78
79 mem = q->bufs[i]->priv;
80 if (mem) {
81 /* This callback is called only if kernel has
82 allocated memory and this memory is mmapped.
83 In this case, memory should be freed,
84 in order to do memory unmap.
85 */
86
87 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
88
89 /* vfree is not atomic - can't be
90 called with IRQ's disabled
91 */
92 dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
93 i, mem->vaddr);
94
95 dma_free_coherent(q->dev, mem->size,
96 mem->vaddr, mem->dma_handle);
97 mem->vaddr = NULL;
98 }
99
100 q->bufs[i]->map = NULL;
101 q->bufs[i]->baddr = 0;
102 }
103
104 kfree(map);
105
106 mutex_unlock(&q->vb_lock);
107 }
108 }
109
110 static const struct vm_operations_struct videobuf_vm_ops = {
111 .open = videobuf_vm_open,
112 .close = videobuf_vm_close,
113 };
114
115 /**
116 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
117 * @mem: per-buffer private videobuf-dma-contig data
118 *
119 * This function resets the user space pointer
120 */
121 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
122 {
123 mem->is_userptr = 0;
124 mem->dma_handle = 0;
125 mem->size = 0;
126 }
127
128 /**
129 * videobuf_dma_contig_user_get() - setup user space memory pointer
130 * @mem: per-buffer private videobuf-dma-contig data
131 * @vb: video buffer to map
132 *
133 * This function validates and sets up a pointer to user space memory.
134 * Only physically contiguous pfn-mapped memory is accepted.
135 *
136 * Returns 0 if successful.
137 */
138 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
139 struct videobuf_buffer *vb)
140 {
141 struct mm_struct *mm = current->mm;
142 struct vm_area_struct *vma;
143 unsigned long prev_pfn, this_pfn;
144 unsigned long pages_done, user_address;
145 unsigned int offset;
146 int ret;
147
148 offset = vb->baddr & ~PAGE_MASK;
149 mem->size = PAGE_ALIGN(vb->size + offset);
150 mem->is_userptr = 0;
151 ret = -EINVAL;
152
153 down_read(&mm->mmap_sem);
154
155 vma = find_vma(mm, vb->baddr);
156 if (!vma)
157 goto out_up;
158
159 if ((vb->baddr + mem->size) > vma->vm_end)
160 goto out_up;
161
162 pages_done = 0;
163 prev_pfn = 0; /* kill warning */
164 user_address = vb->baddr;
165
166 while (pages_done < (mem->size >> PAGE_SHIFT)) {
167 ret = follow_pfn(vma, user_address, &this_pfn);
168 if (ret)
169 break;
170
171 if (pages_done == 0)
172 mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
173 else if (this_pfn != (prev_pfn + 1))
174 ret = -EFAULT;
175
176 if (ret)
177 break;
178
179 prev_pfn = this_pfn;
180 user_address += PAGE_SIZE;
181 pages_done++;
182 }
183
184 if (!ret)
185 mem->is_userptr = 1;
186
187 out_up:
188 up_read(&current->mm->mmap_sem);
189
190 return ret;
191 }
192
193 static void *__videobuf_alloc(size_t size)
194 {
195 struct videobuf_dma_contig_memory *mem;
196 struct videobuf_buffer *vb;
197
198 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
199 if (vb) {
200 mem = vb->priv = ((char *)vb) + size;
201 mem->magic = MAGIC_DC_MEM;
202 }
203
204 return vb;
205 }
206
207 static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
208 {
209 struct videobuf_dma_contig_memory *mem = buf->priv;
210
211 BUG_ON(!mem);
212 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
213
214 return mem->vaddr;
215 }
216
217 static int __videobuf_iolock(struct videobuf_queue *q,
218 struct videobuf_buffer *vb,
219 struct v4l2_framebuffer *fbuf)
220 {
221 struct videobuf_dma_contig_memory *mem = vb->priv;
222
223 BUG_ON(!mem);
224 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
225
226 switch (vb->memory) {
227 case V4L2_MEMORY_MMAP:
228 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
229
230 /* All handling should be done by __videobuf_mmap_mapper() */
231 if (!mem->vaddr) {
232 dev_err(q->dev, "memory is not alloced/mmapped.\n");
233 return -EINVAL;
234 }
235 break;
236 case V4L2_MEMORY_USERPTR:
237 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
238
239 /* handle pointer from user space */
240 if (vb->baddr)
241 return videobuf_dma_contig_user_get(mem, vb);
242
243 /* allocate memory for the read() method */
244 mem->size = PAGE_ALIGN(vb->size);
245 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
246 &mem->dma_handle, GFP_KERNEL);
247 if (!mem->vaddr) {
248 dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
249 mem->size);
250 return -ENOMEM;
251 }
252
253 dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
254 mem->vaddr, mem->size);
255 break;
256 case V4L2_MEMORY_OVERLAY:
257 default:
258 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
259 __func__);
260 return -EINVAL;
261 }
262
263 return 0;
264 }
265
266 static int __videobuf_mmap_free(struct videobuf_queue *q)
267 {
268 unsigned int i;
269
270 dev_dbg(q->dev, "%s\n", __func__);
271 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
272 if (q->bufs[i] && q->bufs[i]->map)
273 return -EBUSY;
274 }
275
276 return 0;
277 }
278
279 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
280 struct vm_area_struct *vma)
281 {
282 struct videobuf_dma_contig_memory *mem;
283 struct videobuf_mapping *map;
284 unsigned int first;
285 int retval;
286 unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
287
288 dev_dbg(q->dev, "%s\n", __func__);
289 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
290 return -EINVAL;
291
292 /* look for first buffer to map */
293 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
294 if (!q->bufs[first])
295 continue;
296
297 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
298 continue;
299 if (q->bufs[first]->boff == offset)
300 break;
301 }
302 if (VIDEO_MAX_FRAME == first) {
303 dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
304 offset);
305 return -EINVAL;
306 }
307
308 /* create mapping + update buffer list */
309 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
310 if (!map)
311 return -ENOMEM;
312
313 q->bufs[first]->map = map;
314 map->start = vma->vm_start;
315 map->end = vma->vm_end;
316 map->q = q;
317
318 q->bufs[first]->baddr = vma->vm_start;
319
320 mem = q->bufs[first]->priv;
321 BUG_ON(!mem);
322 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
323
324 mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
325 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
326 &mem->dma_handle, GFP_KERNEL);
327 if (!mem->vaddr) {
328 dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
329 mem->size);
330 goto error;
331 }
332 dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
333 mem->vaddr, mem->size);
334
335 /* Try to remap memory */
336
337 size = vma->vm_end - vma->vm_start;
338 size = (size < mem->size) ? size : mem->size;
339
340 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
341 retval = remap_pfn_range(vma, vma->vm_start,
342 mem->dma_handle >> PAGE_SHIFT,
343 size, vma->vm_page_prot);
344 if (retval) {
345 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
346 dma_free_coherent(q->dev, mem->size,
347 mem->vaddr, mem->dma_handle);
348 goto error;
349 }
350
351 vma->vm_ops = &videobuf_vm_ops;
352 vma->vm_flags |= VM_DONTEXPAND;
353 vma->vm_private_data = map;
354
355 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
356 map, q, vma->vm_start, vma->vm_end,
357 (long int) q->bufs[first]->bsize,
358 vma->vm_pgoff, first);
359
360 videobuf_vm_open(vma);
361
362 return 0;
363
364 error:
365 kfree(map);
366 return -ENOMEM;
367 }
368
369 static int __videobuf_copy_to_user(struct videobuf_queue *q,
370 char __user *data, size_t count,
371 int nonblocking)
372 {
373 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
374 void *vaddr;
375
376 BUG_ON(!mem);
377 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
378 BUG_ON(!mem->vaddr);
379
380 /* copy to userspace */
381 if (count > q->read_buf->size - q->read_off)
382 count = q->read_buf->size - q->read_off;
383
384 vaddr = mem->vaddr;
385
386 if (copy_to_user(data, vaddr + q->read_off, count))
387 return -EFAULT;
388
389 return count;
390 }
391
392 static int __videobuf_copy_stream(struct videobuf_queue *q,
393 char __user *data, size_t count, size_t pos,
394 int vbihack, int nonblocking)
395 {
396 unsigned int *fc;
397 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
398
399 BUG_ON(!mem);
400 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
401
402 if (vbihack) {
403 /* dirty, undocumented hack -- pass the frame counter
404 * within the last four bytes of each vbi data block.
405 * We need that one to maintain backward compatibility
406 * to all vbi decoding software out there ... */
407 fc = (unsigned int *)mem->vaddr;
408 fc += (q->read_buf->size >> 2) - 1;
409 *fc = q->read_buf->field_count >> 1;
410 dev_dbg(q->dev, "vbihack: %d\n", *fc);
411 }
412
413 /* copy stuff using the common method */
414 count = __videobuf_copy_to_user(q, data, count, nonblocking);
415
416 if ((count == -EFAULT) && (pos == 0))
417 return -EFAULT;
418
419 return count;
420 }
421
422 static struct videobuf_qtype_ops qops = {
423 .magic = MAGIC_QTYPE_OPS,
424
425 .alloc = __videobuf_alloc,
426 .iolock = __videobuf_iolock,
427 .mmap_free = __videobuf_mmap_free,
428 .mmap_mapper = __videobuf_mmap_mapper,
429 .video_copy_to_user = __videobuf_copy_to_user,
430 .copy_stream = __videobuf_copy_stream,
431 .vmalloc = __videobuf_to_vmalloc,
432 };
433
434 void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
435 const struct videobuf_queue_ops *ops,
436 struct device *dev,
437 spinlock_t *irqlock,
438 enum v4l2_buf_type type,
439 enum v4l2_field field,
440 unsigned int msize,
441 void *priv)
442 {
443 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
444 priv, &qops);
445 }
446 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
447
448 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
449 {
450 struct videobuf_dma_contig_memory *mem = buf->priv;
451
452 BUG_ON(!mem);
453 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
454
455 return mem->dma_handle;
456 }
457 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
458
459 void videobuf_dma_contig_free(struct videobuf_queue *q,
460 struct videobuf_buffer *buf)
461 {
462 struct videobuf_dma_contig_memory *mem = buf->priv;
463
464 /* mmapped memory can't be freed here, otherwise mmapped region
465 would be released, while still needed. In this case, the memory
466 release should happen inside videobuf_vm_close().
467 So, it should free memory only if the memory were allocated for
468 read() operation.
469 */
470 if (buf->memory != V4L2_MEMORY_USERPTR)
471 return;
472
473 if (!mem)
474 return;
475
476 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
477
478 /* handle user space pointer case */
479 if (buf->baddr) {
480 videobuf_dma_contig_user_put(mem);
481 return;
482 }
483
484 /* read() method */
485 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
486 mem->vaddr = NULL;
487 }
488 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
489
490 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
491 MODULE_AUTHOR("Magnus Damm");
492 MODULE_LICENSE("GPL");