]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/media/v4l2-core/videobuf2-dma-contig.c
[media] vb2: move dma_attrs to vb2_queue
[mirror_ubuntu-bionic-kernel.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
1 /*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19
20 #include <media/videobuf2-v4l2.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_dc_conf {
25 struct device *dev;
26 };
27
28 struct vb2_dc_buf {
29 struct device *dev;
30 void *vaddr;
31 unsigned long size;
32 void *cookie;
33 dma_addr_t dma_addr;
34 struct dma_attrs attrs;
35 enum dma_data_direction dma_dir;
36 struct sg_table *dma_sgt;
37 struct frame_vector *vec;
38
39 /* MMAP related */
40 struct vb2_vmarea_handler handler;
41 atomic_t refcount;
42 struct sg_table *sgt_base;
43
44 /* DMABUF related */
45 struct dma_buf_attachment *db_attach;
46 };
47
48 /*********************************************/
49 /* scatterlist table functions */
50 /*********************************************/
51
52 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
53 {
54 struct scatterlist *s;
55 dma_addr_t expected = sg_dma_address(sgt->sgl);
56 unsigned int i;
57 unsigned long size = 0;
58
59 for_each_sg(sgt->sgl, s, sgt->nents, i) {
60 if (sg_dma_address(s) != expected)
61 break;
62 expected = sg_dma_address(s) + sg_dma_len(s);
63 size += sg_dma_len(s);
64 }
65 return size;
66 }
67
68 /*********************************************/
69 /* callbacks for all buffers */
70 /*********************************************/
71
72 static void *vb2_dc_cookie(void *buf_priv)
73 {
74 struct vb2_dc_buf *buf = buf_priv;
75
76 return &buf->dma_addr;
77 }
78
79 static void *vb2_dc_vaddr(void *buf_priv)
80 {
81 struct vb2_dc_buf *buf = buf_priv;
82
83 if (!buf->vaddr && buf->db_attach)
84 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
85
86 return buf->vaddr;
87 }
88
89 static unsigned int vb2_dc_num_users(void *buf_priv)
90 {
91 struct vb2_dc_buf *buf = buf_priv;
92
93 return atomic_read(&buf->refcount);
94 }
95
96 static void vb2_dc_prepare(void *buf_priv)
97 {
98 struct vb2_dc_buf *buf = buf_priv;
99 struct sg_table *sgt = buf->dma_sgt;
100
101 /* DMABUF exporter will flush the cache for us */
102 if (!sgt || buf->db_attach)
103 return;
104
105 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
106 buf->dma_dir);
107 }
108
109 static void vb2_dc_finish(void *buf_priv)
110 {
111 struct vb2_dc_buf *buf = buf_priv;
112 struct sg_table *sgt = buf->dma_sgt;
113
114 /* DMABUF exporter will flush the cache for us */
115 if (!sgt || buf->db_attach)
116 return;
117
118 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
119 }
120
121 /*********************************************/
122 /* callbacks for MMAP buffers */
123 /*********************************************/
124
125 static void vb2_dc_put(void *buf_priv)
126 {
127 struct vb2_dc_buf *buf = buf_priv;
128
129 if (!atomic_dec_and_test(&buf->refcount))
130 return;
131
132 if (buf->sgt_base) {
133 sg_free_table(buf->sgt_base);
134 kfree(buf->sgt_base);
135 }
136 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
137 &buf->attrs);
138 put_device(buf->dev);
139 kfree(buf);
140 }
141
142 static void *vb2_dc_alloc(void *alloc_ctx, const struct dma_attrs *attrs,
143 unsigned long size, enum dma_data_direction dma_dir,
144 gfp_t gfp_flags)
145 {
146 struct vb2_dc_conf *conf = alloc_ctx;
147 struct device *dev = conf->dev;
148 struct vb2_dc_buf *buf;
149
150 buf = kzalloc(sizeof *buf, GFP_KERNEL);
151 if (!buf)
152 return ERR_PTR(-ENOMEM);
153
154 if (attrs)
155 buf->attrs = *attrs;
156 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
157 GFP_KERNEL | gfp_flags, &buf->attrs);
158 if (!buf->cookie) {
159 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
160 kfree(buf);
161 return ERR_PTR(-ENOMEM);
162 }
163
164 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
165 buf->vaddr = buf->cookie;
166
167 /* Prevent the device from being released while the buffer is used */
168 buf->dev = get_device(dev);
169 buf->size = size;
170 buf->dma_dir = dma_dir;
171
172 buf->handler.refcount = &buf->refcount;
173 buf->handler.put = vb2_dc_put;
174 buf->handler.arg = buf;
175
176 atomic_inc(&buf->refcount);
177
178 return buf;
179 }
180
181 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
182 {
183 struct vb2_dc_buf *buf = buf_priv;
184 int ret;
185
186 if (!buf) {
187 printk(KERN_ERR "No buffer to map\n");
188 return -EINVAL;
189 }
190
191 /*
192 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
193 * map whole buffer
194 */
195 vma->vm_pgoff = 0;
196
197 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
198 buf->dma_addr, buf->size, &buf->attrs);
199
200 if (ret) {
201 pr_err("Remapping memory failed, error: %d\n", ret);
202 return ret;
203 }
204
205 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
206 vma->vm_private_data = &buf->handler;
207 vma->vm_ops = &vb2_common_vm_ops;
208
209 vma->vm_ops->open(vma);
210
211 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
212 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
213 buf->size);
214
215 return 0;
216 }
217
218 /*********************************************/
219 /* DMABUF ops for exporters */
220 /*********************************************/
221
222 struct vb2_dc_attachment {
223 struct sg_table sgt;
224 enum dma_data_direction dma_dir;
225 };
226
227 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
228 struct dma_buf_attachment *dbuf_attach)
229 {
230 struct vb2_dc_attachment *attach;
231 unsigned int i;
232 struct scatterlist *rd, *wr;
233 struct sg_table *sgt;
234 struct vb2_dc_buf *buf = dbuf->priv;
235 int ret;
236
237 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
238 if (!attach)
239 return -ENOMEM;
240
241 sgt = &attach->sgt;
242 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
243 * map the same scatter list to multiple attachments at the same time.
244 */
245 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
246 if (ret) {
247 kfree(attach);
248 return -ENOMEM;
249 }
250
251 rd = buf->sgt_base->sgl;
252 wr = sgt->sgl;
253 for (i = 0; i < sgt->orig_nents; ++i) {
254 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
255 rd = sg_next(rd);
256 wr = sg_next(wr);
257 }
258
259 attach->dma_dir = DMA_NONE;
260 dbuf_attach->priv = attach;
261
262 return 0;
263 }
264
265 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
266 struct dma_buf_attachment *db_attach)
267 {
268 struct vb2_dc_attachment *attach = db_attach->priv;
269 struct sg_table *sgt;
270
271 if (!attach)
272 return;
273
274 sgt = &attach->sgt;
275
276 /* release the scatterlist cache */
277 if (attach->dma_dir != DMA_NONE)
278 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
279 attach->dma_dir);
280 sg_free_table(sgt);
281 kfree(attach);
282 db_attach->priv = NULL;
283 }
284
285 static struct sg_table *vb2_dc_dmabuf_ops_map(
286 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
287 {
288 struct vb2_dc_attachment *attach = db_attach->priv;
289 /* stealing dmabuf mutex to serialize map/unmap operations */
290 struct mutex *lock = &db_attach->dmabuf->lock;
291 struct sg_table *sgt;
292
293 mutex_lock(lock);
294
295 sgt = &attach->sgt;
296 /* return previously mapped sg table */
297 if (attach->dma_dir == dma_dir) {
298 mutex_unlock(lock);
299 return sgt;
300 }
301
302 /* release any previous cache */
303 if (attach->dma_dir != DMA_NONE) {
304 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
305 attach->dma_dir);
306 attach->dma_dir = DMA_NONE;
307 }
308
309 /* mapping to the client with new direction */
310 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
311 dma_dir);
312 if (!sgt->nents) {
313 pr_err("failed to map scatterlist\n");
314 mutex_unlock(lock);
315 return ERR_PTR(-EIO);
316 }
317
318 attach->dma_dir = dma_dir;
319
320 mutex_unlock(lock);
321
322 return sgt;
323 }
324
325 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
326 struct sg_table *sgt, enum dma_data_direction dma_dir)
327 {
328 /* nothing to be done here */
329 }
330
331 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
332 {
333 /* drop reference obtained in vb2_dc_get_dmabuf */
334 vb2_dc_put(dbuf->priv);
335 }
336
337 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
338 {
339 struct vb2_dc_buf *buf = dbuf->priv;
340
341 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
342 }
343
344 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
345 {
346 struct vb2_dc_buf *buf = dbuf->priv;
347
348 return buf->vaddr;
349 }
350
351 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
352 struct vm_area_struct *vma)
353 {
354 return vb2_dc_mmap(dbuf->priv, vma);
355 }
356
357 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
358 .attach = vb2_dc_dmabuf_ops_attach,
359 .detach = vb2_dc_dmabuf_ops_detach,
360 .map_dma_buf = vb2_dc_dmabuf_ops_map,
361 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
362 .kmap = vb2_dc_dmabuf_ops_kmap,
363 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
364 .vmap = vb2_dc_dmabuf_ops_vmap,
365 .mmap = vb2_dc_dmabuf_ops_mmap,
366 .release = vb2_dc_dmabuf_ops_release,
367 };
368
369 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
370 {
371 int ret;
372 struct sg_table *sgt;
373
374 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
375 if (!sgt) {
376 dev_err(buf->dev, "failed to alloc sg table\n");
377 return NULL;
378 }
379
380 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
381 buf->size, &buf->attrs);
382 if (ret < 0) {
383 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
384 kfree(sgt);
385 return NULL;
386 }
387
388 return sgt;
389 }
390
391 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
392 {
393 struct vb2_dc_buf *buf = buf_priv;
394 struct dma_buf *dbuf;
395 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
396
397 exp_info.ops = &vb2_dc_dmabuf_ops;
398 exp_info.size = buf->size;
399 exp_info.flags = flags;
400 exp_info.priv = buf;
401
402 if (!buf->sgt_base)
403 buf->sgt_base = vb2_dc_get_base_sgt(buf);
404
405 if (WARN_ON(!buf->sgt_base))
406 return NULL;
407
408 dbuf = dma_buf_export(&exp_info);
409 if (IS_ERR(dbuf))
410 return NULL;
411
412 /* dmabuf keeps reference to vb2 buffer */
413 atomic_inc(&buf->refcount);
414
415 return dbuf;
416 }
417
418 /*********************************************/
419 /* callbacks for USERPTR buffers */
420 /*********************************************/
421
422 static void vb2_dc_put_userptr(void *buf_priv)
423 {
424 struct vb2_dc_buf *buf = buf_priv;
425 struct sg_table *sgt = buf->dma_sgt;
426 int i;
427 struct page **pages;
428
429 if (sgt) {
430 DEFINE_DMA_ATTRS(attrs);
431
432 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
433 /*
434 * No need to sync to CPU, it's already synced to the CPU
435 * since the finish() memop will have been called before this.
436 */
437 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
438 buf->dma_dir, &attrs);
439 pages = frame_vector_pages(buf->vec);
440 /* sgt should exist only if vector contains pages... */
441 BUG_ON(IS_ERR(pages));
442 for (i = 0; i < frame_vector_count(buf->vec); i++)
443 set_page_dirty_lock(pages[i]);
444 sg_free_table(sgt);
445 kfree(sgt);
446 }
447 vb2_destroy_framevec(buf->vec);
448 kfree(buf);
449 }
450
451 /*
452 * For some kind of reserved memory there might be no struct page available,
453 * so all that can be done to support such 'pages' is to try to convert
454 * pfn to dma address or at the last resort just assume that
455 * dma address == physical address (like it has been assumed in earlier version
456 * of videobuf2-dma-contig
457 */
458
459 #ifdef __arch_pfn_to_dma
460 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
461 {
462 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
463 }
464 #elif defined(__pfn_to_bus)
465 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
466 {
467 return (dma_addr_t)__pfn_to_bus(pfn);
468 }
469 #elif defined(__pfn_to_phys)
470 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
471 {
472 return (dma_addr_t)__pfn_to_phys(pfn);
473 }
474 #else
475 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
476 {
477 /* really, we cannot do anything better at this point */
478 return (dma_addr_t)(pfn) << PAGE_SHIFT;
479 }
480 #endif
481
482 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
483 unsigned long size, enum dma_data_direction dma_dir)
484 {
485 struct vb2_dc_conf *conf = alloc_ctx;
486 struct vb2_dc_buf *buf;
487 struct frame_vector *vec;
488 unsigned long offset;
489 int n_pages, i;
490 int ret = 0;
491 struct sg_table *sgt;
492 unsigned long contig_size;
493 unsigned long dma_align = dma_get_cache_alignment();
494 DEFINE_DMA_ATTRS(attrs);
495
496 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
497
498 /* Only cache aligned DMA transfers are reliable */
499 if (!IS_ALIGNED(vaddr | size, dma_align)) {
500 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
501 return ERR_PTR(-EINVAL);
502 }
503
504 if (!size) {
505 pr_debug("size is zero\n");
506 return ERR_PTR(-EINVAL);
507 }
508
509 buf = kzalloc(sizeof *buf, GFP_KERNEL);
510 if (!buf)
511 return ERR_PTR(-ENOMEM);
512
513 buf->dev = conf->dev;
514 buf->dma_dir = dma_dir;
515
516 offset = vaddr & ~PAGE_MASK;
517 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
518 if (IS_ERR(vec)) {
519 ret = PTR_ERR(vec);
520 goto fail_buf;
521 }
522 buf->vec = vec;
523 n_pages = frame_vector_count(vec);
524 ret = frame_vector_to_pages(vec);
525 if (ret < 0) {
526 unsigned long *nums = frame_vector_pfns(vec);
527
528 /*
529 * Failed to convert to pages... Check the memory is physically
530 * contiguous and use direct mapping
531 */
532 for (i = 1; i < n_pages; i++)
533 if (nums[i-1] + 1 != nums[i])
534 goto fail_pfnvec;
535 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
536 goto out;
537 }
538
539 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
540 if (!sgt) {
541 pr_err("failed to allocate sg table\n");
542 ret = -ENOMEM;
543 goto fail_pfnvec;
544 }
545
546 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
547 offset, size, GFP_KERNEL);
548 if (ret) {
549 pr_err("failed to initialize sg table\n");
550 goto fail_sgt;
551 }
552
553 /*
554 * No need to sync to the device, this will happen later when the
555 * prepare() memop is called.
556 */
557 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
558 buf->dma_dir, &attrs);
559 if (sgt->nents <= 0) {
560 pr_err("failed to map scatterlist\n");
561 ret = -EIO;
562 goto fail_sgt_init;
563 }
564
565 contig_size = vb2_dc_get_contiguous_size(sgt);
566 if (contig_size < size) {
567 pr_err("contiguous mapping is too small %lu/%lu\n",
568 contig_size, size);
569 ret = -EFAULT;
570 goto fail_map_sg;
571 }
572
573 buf->dma_addr = sg_dma_address(sgt->sgl);
574 buf->dma_sgt = sgt;
575 out:
576 buf->size = size;
577
578 return buf;
579
580 fail_map_sg:
581 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
582 buf->dma_dir, &attrs);
583
584 fail_sgt_init:
585 sg_free_table(sgt);
586
587 fail_sgt:
588 kfree(sgt);
589
590 fail_pfnvec:
591 vb2_destroy_framevec(vec);
592
593 fail_buf:
594 kfree(buf);
595
596 return ERR_PTR(ret);
597 }
598
599 /*********************************************/
600 /* callbacks for DMABUF buffers */
601 /*********************************************/
602
603 static int vb2_dc_map_dmabuf(void *mem_priv)
604 {
605 struct vb2_dc_buf *buf = mem_priv;
606 struct sg_table *sgt;
607 unsigned long contig_size;
608
609 if (WARN_ON(!buf->db_attach)) {
610 pr_err("trying to pin a non attached buffer\n");
611 return -EINVAL;
612 }
613
614 if (WARN_ON(buf->dma_sgt)) {
615 pr_err("dmabuf buffer is already pinned\n");
616 return 0;
617 }
618
619 /* get the associated scatterlist for this buffer */
620 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
621 if (IS_ERR(sgt)) {
622 pr_err("Error getting dmabuf scatterlist\n");
623 return -EINVAL;
624 }
625
626 /* checking if dmabuf is big enough to store contiguous chunk */
627 contig_size = vb2_dc_get_contiguous_size(sgt);
628 if (contig_size < buf->size) {
629 pr_err("contiguous chunk is too small %lu/%lu b\n",
630 contig_size, buf->size);
631 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
632 return -EFAULT;
633 }
634
635 buf->dma_addr = sg_dma_address(sgt->sgl);
636 buf->dma_sgt = sgt;
637 buf->vaddr = NULL;
638
639 return 0;
640 }
641
642 static void vb2_dc_unmap_dmabuf(void *mem_priv)
643 {
644 struct vb2_dc_buf *buf = mem_priv;
645 struct sg_table *sgt = buf->dma_sgt;
646
647 if (WARN_ON(!buf->db_attach)) {
648 pr_err("trying to unpin a not attached buffer\n");
649 return;
650 }
651
652 if (WARN_ON(!sgt)) {
653 pr_err("dmabuf buffer is already unpinned\n");
654 return;
655 }
656
657 if (buf->vaddr) {
658 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
659 buf->vaddr = NULL;
660 }
661 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
662
663 buf->dma_addr = 0;
664 buf->dma_sgt = NULL;
665 }
666
667 static void vb2_dc_detach_dmabuf(void *mem_priv)
668 {
669 struct vb2_dc_buf *buf = mem_priv;
670
671 /* if vb2 works correctly you should never detach mapped buffer */
672 if (WARN_ON(buf->dma_addr))
673 vb2_dc_unmap_dmabuf(buf);
674
675 /* detach this attachment */
676 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
677 kfree(buf);
678 }
679
680 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
681 unsigned long size, enum dma_data_direction dma_dir)
682 {
683 struct vb2_dc_conf *conf = alloc_ctx;
684 struct vb2_dc_buf *buf;
685 struct dma_buf_attachment *dba;
686
687 if (dbuf->size < size)
688 return ERR_PTR(-EFAULT);
689
690 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
691 if (!buf)
692 return ERR_PTR(-ENOMEM);
693
694 buf->dev = conf->dev;
695 /* create attachment for the dmabuf with the user device */
696 dba = dma_buf_attach(dbuf, buf->dev);
697 if (IS_ERR(dba)) {
698 pr_err("failed to attach dmabuf\n");
699 kfree(buf);
700 return dba;
701 }
702
703 buf->dma_dir = dma_dir;
704 buf->size = size;
705 buf->db_attach = dba;
706
707 return buf;
708 }
709
710 /*********************************************/
711 /* DMA CONTIG exported functions */
712 /*********************************************/
713
714 const struct vb2_mem_ops vb2_dma_contig_memops = {
715 .alloc = vb2_dc_alloc,
716 .put = vb2_dc_put,
717 .get_dmabuf = vb2_dc_get_dmabuf,
718 .cookie = vb2_dc_cookie,
719 .vaddr = vb2_dc_vaddr,
720 .mmap = vb2_dc_mmap,
721 .get_userptr = vb2_dc_get_userptr,
722 .put_userptr = vb2_dc_put_userptr,
723 .prepare = vb2_dc_prepare,
724 .finish = vb2_dc_finish,
725 .map_dmabuf = vb2_dc_map_dmabuf,
726 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
727 .attach_dmabuf = vb2_dc_attach_dmabuf,
728 .detach_dmabuf = vb2_dc_detach_dmabuf,
729 .num_users = vb2_dc_num_users,
730 };
731 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
732
733 void *vb2_dma_contig_init_ctx(struct device *dev)
734 {
735 struct vb2_dc_conf *conf;
736
737 conf = kzalloc(sizeof *conf, GFP_KERNEL);
738 if (!conf)
739 return ERR_PTR(-ENOMEM);
740
741 conf->dev = dev;
742
743 return conf;
744 }
745 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
746
747 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
748 {
749 if (!IS_ERR_OR_NULL(alloc_ctx))
750 kfree(alloc_ctx);
751 }
752 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
753
754 /**
755 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
756 * @dev: device for configuring DMA parameters
757 * @size: size of DMA max segment size to set
758 *
759 * To allow mapping the scatter-list into a single chunk in the DMA
760 * address space, the device is required to have the DMA max segment
761 * size parameter set to a value larger than the buffer size. Otherwise,
762 * the DMA-mapping subsystem will split the mapping into max segment
763 * size chunks. This function sets the DMA max segment size
764 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
765 * address space.
766 * This code assumes that the DMA-mapping subsystem will merge all
767 * scatterlist segments if this is really possible (for example when
768 * an IOMMU is available and enabled).
769 * Ideally, this parameter should be set by the generic bus code, but it
770 * is left with the default 64KiB value due to historical litmiations in
771 * other subsystems (like limited USB host drivers) and there no good
772 * place to set it to the proper value.
773 * This function should be called from the drivers, which are known to
774 * operate on platforms with IOMMU and provide access to shared buffers
775 * (either USERPTR or DMABUF). This should be done before initializing
776 * videobuf2 queue.
777 */
778 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
779 {
780 if (!dev->dma_parms) {
781 dev->dma_parms = kzalloc(sizeof(dev->dma_parms), GFP_KERNEL);
782 if (!dev->dma_parms)
783 return -ENOMEM;
784 }
785 if (dma_get_max_seg_size(dev) < size)
786 return dma_set_max_seg_size(dev, size);
787
788 return 0;
789 }
790 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
791
792 /*
793 * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
794 * @dev: device for configuring DMA parameters
795 *
796 * This function releases resources allocated to configure DMA parameters
797 * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
798 * device drivers on driver remove.
799 */
800 void vb2_dma_contig_clear_max_seg_size(struct device *dev)
801 {
802 kfree(dev->dma_parms);
803 dev->dma_parms = NULL;
804 }
805 EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
806
807 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
808 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
809 MODULE_LICENSE("GPL");