]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/media/v4l2-core/videobuf2-dma-contig.c
Merge tag 'media/v4.8-5' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[mirror_ubuntu-zesty-kernel.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
1 /*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19
20 #include <media/videobuf2-v4l2.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_dc_buf {
25 struct device *dev;
26 void *vaddr;
27 unsigned long size;
28 void *cookie;
29 dma_addr_t dma_addr;
30 unsigned long attrs;
31 enum dma_data_direction dma_dir;
32 struct sg_table *dma_sgt;
33 struct frame_vector *vec;
34
35 /* MMAP related */
36 struct vb2_vmarea_handler handler;
37 atomic_t refcount;
38 struct sg_table *sgt_base;
39
40 /* DMABUF related */
41 struct dma_buf_attachment *db_attach;
42 };
43
44 /*********************************************/
45 /* scatterlist table functions */
46 /*********************************************/
47
48 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
49 {
50 struct scatterlist *s;
51 dma_addr_t expected = sg_dma_address(sgt->sgl);
52 unsigned int i;
53 unsigned long size = 0;
54
55 for_each_sg(sgt->sgl, s, sgt->nents, i) {
56 if (sg_dma_address(s) != expected)
57 break;
58 expected = sg_dma_address(s) + sg_dma_len(s);
59 size += sg_dma_len(s);
60 }
61 return size;
62 }
63
64 /*********************************************/
65 /* callbacks for all buffers */
66 /*********************************************/
67
68 static void *vb2_dc_cookie(void *buf_priv)
69 {
70 struct vb2_dc_buf *buf = buf_priv;
71
72 return &buf->dma_addr;
73 }
74
75 static void *vb2_dc_vaddr(void *buf_priv)
76 {
77 struct vb2_dc_buf *buf = buf_priv;
78
79 if (!buf->vaddr && buf->db_attach)
80 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
81
82 return buf->vaddr;
83 }
84
85 static unsigned int vb2_dc_num_users(void *buf_priv)
86 {
87 struct vb2_dc_buf *buf = buf_priv;
88
89 return atomic_read(&buf->refcount);
90 }
91
92 static void vb2_dc_prepare(void *buf_priv)
93 {
94 struct vb2_dc_buf *buf = buf_priv;
95 struct sg_table *sgt = buf->dma_sgt;
96
97 /* DMABUF exporter will flush the cache for us */
98 if (!sgt || buf->db_attach)
99 return;
100
101 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
102 buf->dma_dir);
103 }
104
105 static void vb2_dc_finish(void *buf_priv)
106 {
107 struct vb2_dc_buf *buf = buf_priv;
108 struct sg_table *sgt = buf->dma_sgt;
109
110 /* DMABUF exporter will flush the cache for us */
111 if (!sgt || buf->db_attach)
112 return;
113
114 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
115 }
116
117 /*********************************************/
118 /* callbacks for MMAP buffers */
119 /*********************************************/
120
121 static void vb2_dc_put(void *buf_priv)
122 {
123 struct vb2_dc_buf *buf = buf_priv;
124
125 if (!atomic_dec_and_test(&buf->refcount))
126 return;
127
128 if (buf->sgt_base) {
129 sg_free_table(buf->sgt_base);
130 kfree(buf->sgt_base);
131 }
132 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
133 buf->attrs);
134 put_device(buf->dev);
135 kfree(buf);
136 }
137
138 static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
139 unsigned long size, enum dma_data_direction dma_dir,
140 gfp_t gfp_flags)
141 {
142 struct vb2_dc_buf *buf;
143
144 buf = kzalloc(sizeof *buf, GFP_KERNEL);
145 if (!buf)
146 return ERR_PTR(-ENOMEM);
147
148 if (attrs)
149 buf->attrs = attrs;
150 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
151 GFP_KERNEL | gfp_flags, buf->attrs);
152 if (!buf->cookie) {
153 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
154 kfree(buf);
155 return ERR_PTR(-ENOMEM);
156 }
157
158 if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
159 buf->vaddr = buf->cookie;
160
161 /* Prevent the device from being released while the buffer is used */
162 buf->dev = get_device(dev);
163 buf->size = size;
164 buf->dma_dir = dma_dir;
165
166 buf->handler.refcount = &buf->refcount;
167 buf->handler.put = vb2_dc_put;
168 buf->handler.arg = buf;
169
170 atomic_inc(&buf->refcount);
171
172 return buf;
173 }
174
175 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
176 {
177 struct vb2_dc_buf *buf = buf_priv;
178 int ret;
179
180 if (!buf) {
181 printk(KERN_ERR "No buffer to map\n");
182 return -EINVAL;
183 }
184
185 /*
186 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
187 * map whole buffer
188 */
189 vma->vm_pgoff = 0;
190
191 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
192 buf->dma_addr, buf->size, buf->attrs);
193
194 if (ret) {
195 pr_err("Remapping memory failed, error: %d\n", ret);
196 return ret;
197 }
198
199 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
200 vma->vm_private_data = &buf->handler;
201 vma->vm_ops = &vb2_common_vm_ops;
202
203 vma->vm_ops->open(vma);
204
205 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
206 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
207 buf->size);
208
209 return 0;
210 }
211
212 /*********************************************/
213 /* DMABUF ops for exporters */
214 /*********************************************/
215
216 struct vb2_dc_attachment {
217 struct sg_table sgt;
218 enum dma_data_direction dma_dir;
219 };
220
221 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
222 struct dma_buf_attachment *dbuf_attach)
223 {
224 struct vb2_dc_attachment *attach;
225 unsigned int i;
226 struct scatterlist *rd, *wr;
227 struct sg_table *sgt;
228 struct vb2_dc_buf *buf = dbuf->priv;
229 int ret;
230
231 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
232 if (!attach)
233 return -ENOMEM;
234
235 sgt = &attach->sgt;
236 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
237 * map the same scatter list to multiple attachments at the same time.
238 */
239 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
240 if (ret) {
241 kfree(attach);
242 return -ENOMEM;
243 }
244
245 rd = buf->sgt_base->sgl;
246 wr = sgt->sgl;
247 for (i = 0; i < sgt->orig_nents; ++i) {
248 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
249 rd = sg_next(rd);
250 wr = sg_next(wr);
251 }
252
253 attach->dma_dir = DMA_NONE;
254 dbuf_attach->priv = attach;
255
256 return 0;
257 }
258
259 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
260 struct dma_buf_attachment *db_attach)
261 {
262 struct vb2_dc_attachment *attach = db_attach->priv;
263 struct sg_table *sgt;
264
265 if (!attach)
266 return;
267
268 sgt = &attach->sgt;
269
270 /* release the scatterlist cache */
271 if (attach->dma_dir != DMA_NONE)
272 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
273 attach->dma_dir);
274 sg_free_table(sgt);
275 kfree(attach);
276 db_attach->priv = NULL;
277 }
278
279 static struct sg_table *vb2_dc_dmabuf_ops_map(
280 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
281 {
282 struct vb2_dc_attachment *attach = db_attach->priv;
283 /* stealing dmabuf mutex to serialize map/unmap operations */
284 struct mutex *lock = &db_attach->dmabuf->lock;
285 struct sg_table *sgt;
286
287 mutex_lock(lock);
288
289 sgt = &attach->sgt;
290 /* return previously mapped sg table */
291 if (attach->dma_dir == dma_dir) {
292 mutex_unlock(lock);
293 return sgt;
294 }
295
296 /* release any previous cache */
297 if (attach->dma_dir != DMA_NONE) {
298 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
299 attach->dma_dir);
300 attach->dma_dir = DMA_NONE;
301 }
302
303 /* mapping to the client with new direction */
304 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
305 dma_dir);
306 if (!sgt->nents) {
307 pr_err("failed to map scatterlist\n");
308 mutex_unlock(lock);
309 return ERR_PTR(-EIO);
310 }
311
312 attach->dma_dir = dma_dir;
313
314 mutex_unlock(lock);
315
316 return sgt;
317 }
318
319 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
320 struct sg_table *sgt, enum dma_data_direction dma_dir)
321 {
322 /* nothing to be done here */
323 }
324
325 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
326 {
327 /* drop reference obtained in vb2_dc_get_dmabuf */
328 vb2_dc_put(dbuf->priv);
329 }
330
331 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
332 {
333 struct vb2_dc_buf *buf = dbuf->priv;
334
335 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
336 }
337
338 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
339 {
340 struct vb2_dc_buf *buf = dbuf->priv;
341
342 return buf->vaddr;
343 }
344
345 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
346 struct vm_area_struct *vma)
347 {
348 return vb2_dc_mmap(dbuf->priv, vma);
349 }
350
351 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
352 .attach = vb2_dc_dmabuf_ops_attach,
353 .detach = vb2_dc_dmabuf_ops_detach,
354 .map_dma_buf = vb2_dc_dmabuf_ops_map,
355 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
356 .kmap = vb2_dc_dmabuf_ops_kmap,
357 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
358 .vmap = vb2_dc_dmabuf_ops_vmap,
359 .mmap = vb2_dc_dmabuf_ops_mmap,
360 .release = vb2_dc_dmabuf_ops_release,
361 };
362
363 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
364 {
365 int ret;
366 struct sg_table *sgt;
367
368 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
369 if (!sgt) {
370 dev_err(buf->dev, "failed to alloc sg table\n");
371 return NULL;
372 }
373
374 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
375 buf->size, buf->attrs);
376 if (ret < 0) {
377 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
378 kfree(sgt);
379 return NULL;
380 }
381
382 return sgt;
383 }
384
385 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
386 {
387 struct vb2_dc_buf *buf = buf_priv;
388 struct dma_buf *dbuf;
389 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
390
391 exp_info.ops = &vb2_dc_dmabuf_ops;
392 exp_info.size = buf->size;
393 exp_info.flags = flags;
394 exp_info.priv = buf;
395
396 if (!buf->sgt_base)
397 buf->sgt_base = vb2_dc_get_base_sgt(buf);
398
399 if (WARN_ON(!buf->sgt_base))
400 return NULL;
401
402 dbuf = dma_buf_export(&exp_info);
403 if (IS_ERR(dbuf))
404 return NULL;
405
406 /* dmabuf keeps reference to vb2 buffer */
407 atomic_inc(&buf->refcount);
408
409 return dbuf;
410 }
411
412 /*********************************************/
413 /* callbacks for USERPTR buffers */
414 /*********************************************/
415
416 static void vb2_dc_put_userptr(void *buf_priv)
417 {
418 struct vb2_dc_buf *buf = buf_priv;
419 struct sg_table *sgt = buf->dma_sgt;
420 int i;
421 struct page **pages;
422
423 if (sgt) {
424 /*
425 * No need to sync to CPU, it's already synced to the CPU
426 * since the finish() memop will have been called before this.
427 */
428 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
429 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
430 pages = frame_vector_pages(buf->vec);
431 /* sgt should exist only if vector contains pages... */
432 BUG_ON(IS_ERR(pages));
433 for (i = 0; i < frame_vector_count(buf->vec); i++)
434 set_page_dirty_lock(pages[i]);
435 sg_free_table(sgt);
436 kfree(sgt);
437 }
438 vb2_destroy_framevec(buf->vec);
439 kfree(buf);
440 }
441
442 /*
443 * For some kind of reserved memory there might be no struct page available,
444 * so all that can be done to support such 'pages' is to try to convert
445 * pfn to dma address or at the last resort just assume that
446 * dma address == physical address (like it has been assumed in earlier version
447 * of videobuf2-dma-contig
448 */
449
450 #ifdef __arch_pfn_to_dma
451 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
452 {
453 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
454 }
455 #elif defined(__pfn_to_bus)
456 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
457 {
458 return (dma_addr_t)__pfn_to_bus(pfn);
459 }
460 #elif defined(__pfn_to_phys)
461 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
462 {
463 return (dma_addr_t)__pfn_to_phys(pfn);
464 }
465 #else
466 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
467 {
468 /* really, we cannot do anything better at this point */
469 return (dma_addr_t)(pfn) << PAGE_SHIFT;
470 }
471 #endif
472
473 static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
474 unsigned long size, enum dma_data_direction dma_dir)
475 {
476 struct vb2_dc_buf *buf;
477 struct frame_vector *vec;
478 unsigned long offset;
479 int n_pages, i;
480 int ret = 0;
481 struct sg_table *sgt;
482 unsigned long contig_size;
483 unsigned long dma_align = dma_get_cache_alignment();
484
485 /* Only cache aligned DMA transfers are reliable */
486 if (!IS_ALIGNED(vaddr | size, dma_align)) {
487 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
488 return ERR_PTR(-EINVAL);
489 }
490
491 if (!size) {
492 pr_debug("size is zero\n");
493 return ERR_PTR(-EINVAL);
494 }
495
496 buf = kzalloc(sizeof *buf, GFP_KERNEL);
497 if (!buf)
498 return ERR_PTR(-ENOMEM);
499
500 buf->dev = dev;
501 buf->dma_dir = dma_dir;
502
503 offset = vaddr & ~PAGE_MASK;
504 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
505 if (IS_ERR(vec)) {
506 ret = PTR_ERR(vec);
507 goto fail_buf;
508 }
509 buf->vec = vec;
510 n_pages = frame_vector_count(vec);
511 ret = frame_vector_to_pages(vec);
512 if (ret < 0) {
513 unsigned long *nums = frame_vector_pfns(vec);
514
515 /*
516 * Failed to convert to pages... Check the memory is physically
517 * contiguous and use direct mapping
518 */
519 for (i = 1; i < n_pages; i++)
520 if (nums[i-1] + 1 != nums[i])
521 goto fail_pfnvec;
522 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
523 goto out;
524 }
525
526 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
527 if (!sgt) {
528 pr_err("failed to allocate sg table\n");
529 ret = -ENOMEM;
530 goto fail_pfnvec;
531 }
532
533 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
534 offset, size, GFP_KERNEL);
535 if (ret) {
536 pr_err("failed to initialize sg table\n");
537 goto fail_sgt;
538 }
539
540 /*
541 * No need to sync to the device, this will happen later when the
542 * prepare() memop is called.
543 */
544 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
545 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
546 if (sgt->nents <= 0) {
547 pr_err("failed to map scatterlist\n");
548 ret = -EIO;
549 goto fail_sgt_init;
550 }
551
552 contig_size = vb2_dc_get_contiguous_size(sgt);
553 if (contig_size < size) {
554 pr_err("contiguous mapping is too small %lu/%lu\n",
555 contig_size, size);
556 ret = -EFAULT;
557 goto fail_map_sg;
558 }
559
560 buf->dma_addr = sg_dma_address(sgt->sgl);
561 buf->dma_sgt = sgt;
562 out:
563 buf->size = size;
564
565 return buf;
566
567 fail_map_sg:
568 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
569 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
570
571 fail_sgt_init:
572 sg_free_table(sgt);
573
574 fail_sgt:
575 kfree(sgt);
576
577 fail_pfnvec:
578 vb2_destroy_framevec(vec);
579
580 fail_buf:
581 kfree(buf);
582
583 return ERR_PTR(ret);
584 }
585
586 /*********************************************/
587 /* callbacks for DMABUF buffers */
588 /*********************************************/
589
590 static int vb2_dc_map_dmabuf(void *mem_priv)
591 {
592 struct vb2_dc_buf *buf = mem_priv;
593 struct sg_table *sgt;
594 unsigned long contig_size;
595
596 if (WARN_ON(!buf->db_attach)) {
597 pr_err("trying to pin a non attached buffer\n");
598 return -EINVAL;
599 }
600
601 if (WARN_ON(buf->dma_sgt)) {
602 pr_err("dmabuf buffer is already pinned\n");
603 return 0;
604 }
605
606 /* get the associated scatterlist for this buffer */
607 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
608 if (IS_ERR(sgt)) {
609 pr_err("Error getting dmabuf scatterlist\n");
610 return -EINVAL;
611 }
612
613 /* checking if dmabuf is big enough to store contiguous chunk */
614 contig_size = vb2_dc_get_contiguous_size(sgt);
615 if (contig_size < buf->size) {
616 pr_err("contiguous chunk is too small %lu/%lu b\n",
617 contig_size, buf->size);
618 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
619 return -EFAULT;
620 }
621
622 buf->dma_addr = sg_dma_address(sgt->sgl);
623 buf->dma_sgt = sgt;
624 buf->vaddr = NULL;
625
626 return 0;
627 }
628
629 static void vb2_dc_unmap_dmabuf(void *mem_priv)
630 {
631 struct vb2_dc_buf *buf = mem_priv;
632 struct sg_table *sgt = buf->dma_sgt;
633
634 if (WARN_ON(!buf->db_attach)) {
635 pr_err("trying to unpin a not attached buffer\n");
636 return;
637 }
638
639 if (WARN_ON(!sgt)) {
640 pr_err("dmabuf buffer is already unpinned\n");
641 return;
642 }
643
644 if (buf->vaddr) {
645 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
646 buf->vaddr = NULL;
647 }
648 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
649
650 buf->dma_addr = 0;
651 buf->dma_sgt = NULL;
652 }
653
654 static void vb2_dc_detach_dmabuf(void *mem_priv)
655 {
656 struct vb2_dc_buf *buf = mem_priv;
657
658 /* if vb2 works correctly you should never detach mapped buffer */
659 if (WARN_ON(buf->dma_addr))
660 vb2_dc_unmap_dmabuf(buf);
661
662 /* detach this attachment */
663 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
664 kfree(buf);
665 }
666
667 static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
668 unsigned long size, enum dma_data_direction dma_dir)
669 {
670 struct vb2_dc_buf *buf;
671 struct dma_buf_attachment *dba;
672
673 if (dbuf->size < size)
674 return ERR_PTR(-EFAULT);
675
676 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
677 if (!buf)
678 return ERR_PTR(-ENOMEM);
679
680 buf->dev = dev;
681 /* create attachment for the dmabuf with the user device */
682 dba = dma_buf_attach(dbuf, buf->dev);
683 if (IS_ERR(dba)) {
684 pr_err("failed to attach dmabuf\n");
685 kfree(buf);
686 return dba;
687 }
688
689 buf->dma_dir = dma_dir;
690 buf->size = size;
691 buf->db_attach = dba;
692
693 return buf;
694 }
695
696 /*********************************************/
697 /* DMA CONTIG exported functions */
698 /*********************************************/
699
700 const struct vb2_mem_ops vb2_dma_contig_memops = {
701 .alloc = vb2_dc_alloc,
702 .put = vb2_dc_put,
703 .get_dmabuf = vb2_dc_get_dmabuf,
704 .cookie = vb2_dc_cookie,
705 .vaddr = vb2_dc_vaddr,
706 .mmap = vb2_dc_mmap,
707 .get_userptr = vb2_dc_get_userptr,
708 .put_userptr = vb2_dc_put_userptr,
709 .prepare = vb2_dc_prepare,
710 .finish = vb2_dc_finish,
711 .map_dmabuf = vb2_dc_map_dmabuf,
712 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
713 .attach_dmabuf = vb2_dc_attach_dmabuf,
714 .detach_dmabuf = vb2_dc_detach_dmabuf,
715 .num_users = vb2_dc_num_users,
716 };
717 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
718
719 /**
720 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
721 * @dev: device for configuring DMA parameters
722 * @size: size of DMA max segment size to set
723 *
724 * To allow mapping the scatter-list into a single chunk in the DMA
725 * address space, the device is required to have the DMA max segment
726 * size parameter set to a value larger than the buffer size. Otherwise,
727 * the DMA-mapping subsystem will split the mapping into max segment
728 * size chunks. This function sets the DMA max segment size
729 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
730 * address space.
731 * This code assumes that the DMA-mapping subsystem will merge all
732 * scatterlist segments if this is really possible (for example when
733 * an IOMMU is available and enabled).
734 * Ideally, this parameter should be set by the generic bus code, but it
735 * is left with the default 64KiB value due to historical litmiations in
736 * other subsystems (like limited USB host drivers) and there no good
737 * place to set it to the proper value.
738 * This function should be called from the drivers, which are known to
739 * operate on platforms with IOMMU and provide access to shared buffers
740 * (either USERPTR or DMABUF). This should be done before initializing
741 * videobuf2 queue.
742 */
743 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
744 {
745 if (!dev->dma_parms) {
746 dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
747 if (!dev->dma_parms)
748 return -ENOMEM;
749 }
750 if (dma_get_max_seg_size(dev) < size)
751 return dma_set_max_seg_size(dev, size);
752
753 return 0;
754 }
755 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
756
757 /*
758 * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
759 * @dev: device for configuring DMA parameters
760 *
761 * This function releases resources allocated to configure DMA parameters
762 * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
763 * device drivers on driver remove.
764 */
765 void vb2_dma_contig_clear_max_seg_size(struct device *dev)
766 {
767 kfree(dev->dma_parms);
768 dev->dma_parms = NULL;
769 }
770 EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
771
772 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
773 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
774 MODULE_LICENSE("GPL");